commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
e8b757973c342e5afde3cc178bc812db2fd411e2
fix 修复自动上报信息清空设备类型和状态信息
cmdb/api.py
cmdb/api.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # 2017.3 update by [email protected] from django.http import HttpResponse from models import Host, HostGroup, ASSET_TYPE, ASSET_STATUS from django.core.paginator import Paginator, EmptyPage, InvalidPage from django.views.decorators.csrf import csrf_exempt from lib.common import token_verify from lib.deploy_key import deploy_key import logging from lib.log import log from config.views import get_dir try: import json except ImportError, e: import simplejson as json def str2gb(args): """ :参数 args: :返回: GB2312编码 """ return str(args).encode('gb2312') def get_object(model, **kwargs): """ use this function for query 使用改封装函数查询数据库 """ for value in kwargs.values(): if not value: return None the_object = model.objects.filter(**kwargs) if len(the_object) == 1: the_object = the_object[0] else: the_object = None return the_object def page_list_return(total, current=1): """ page 分页,返回本次分页的最小页数到最大页数列表 """ min_page = current - 2 if current - 4 > 0 else 1 max_page = min_page + 4 if min_page + 4 < total else total return range(min_page, max_page + 1) def pages(post_objects, request): """ page public function , return page's object tuple 分页公用函数,返回分页的对象元组 """ paginator = Paginator(post_objects, 65535) try: current_page = int(request.GET.get('page', '1')) except ValueError: current_page = 1 page_range = page_list_return(len(paginator.page_range), current_page) try: page_objects = paginator.page(current_page) except (EmptyPage, InvalidPage): page_objects = paginator.page(paginator.num_pages) if current_page >= 5: show_first = 1 else: show_first = 0 if current_page <= (len(paginator.page_range) - 3): show_end = 1 else: show_end = 0 # 所有对象, 分页器, 本页对象, 所有页码, 本页页码,是否显示第一页,是否显示最后一页 return post_objects, paginator, page_objects, page_range, current_page, show_first, show_end @csrf_exempt @token_verify() def collect(request): asset_info = json.loads(request.body) if request.method == 'POST': vendor = asset_info['vendor'] # group = asset_info['group'] disk = asset_info['disk'] cpu_model = asset_info['cpu_model'] cpu_num = asset_info['cpu_num'] memory = asset_info['memory'] sn = asset_info['sn'] osver = asset_info['osver'] hostname = asset_info['hostname'] ip = asset_info['ip'] asset_type = "" status = "" try: host = Host.objects.get(hostname=hostname) except Exception as msg: print(msg) host = Host() level = get_dir("log_level") ssh_pwd = get_dir("ssh_pwd") log_path = get_dir("log_path") log("cmdb.log", level, log_path) logging.info("==========sshkey deploy start==========") data = deploy_key(ip, ssh_pwd) logging.info(data) logging.info("==========sshkey deploy end==========") # if req.POST.get('identity'): # identity = req.POST.get('identity') # try: # host = Host.objects.get(identity=identity) # except: # host = Host() host.hostname = hostname # host.group = group host.cpu_num = int(cpu_num) host.cpu_model = cpu_model host.memory = int(memory) host.sn = sn host.disk = disk host.os = osver host.vendor = vendor host.ip = ip host.asset_type = asset_type host.status = status host.save() return HttpResponse("Post asset data to server successfully!") else: return HttpResponse("No any post data!") @token_verify() def get_host(request): d = [] try: hostname = request.GET['name'] except Exception as msg: return HttpResponse(msg, status=404) if hostname == "all": all_host = Host.objects.all() ret_host = {'hostname': hostname, 'members': []} for h in all_host: ret_h = {'hostname': h.hostname, 'ipaddr': h.ip} ret_host['members'].append(ret_h) d.append(ret_host) return HttpResponse(json.dumps(d)) else: try: host = Host.objects.get(hostname=hostname) data = {'hostname': host.hostname, 'ip': host.ip} return HttpResponse(json.dumps({'status': 0, 'message': 'ok', 'data': data})) except Exception as msg: return HttpResponse(msg, status=404) @token_verify() def get_group(request): if request.method == 'GET': d = [] try: group_name = request.GET['name'] except Exception as msg: return HttpResponse(msg) if group_name == "all": host_groups = HostGroup.objects.all() for hg in host_groups: ret_hg = {'host_group': hg.name, 'members': []} members = Host.objects.filter(group__name=hg) for h in members: ret_h = {'hostname': h.hostname, 'ipaddr': h.ip} ret_hg['members'].append(ret_h) d.append(ret_hg) return HttpResponse(json.dumps(d)) else: ret_hg = {'host_group': group_name, 'members': []} members = Host.objects.filter(group__name=group_name) for h in members: ret_h = {'hostname': h.hostname, 'ipaddr': h.ip} ret_hg['members'].append(ret_h) d.append(ret_hg) return HttpResponse(json.dumps(d)) return HttpResponse(status=403)
Python
0
@@ -2594,16 +2594,18 @@ %0A + # asset_t @@ -2620,16 +2620,18 @@ %0A + # status @@ -3677,24 +3677,26 @@ = ip%0A + # host.asset_ @@ -3716,24 +3716,26 @@ type%0A + # host.status
59223d2e694530f6f779054e68bf5cf02a4c210e
fix pep8, localflavor/br/forms.py:41:83: W291 trailing whitespace
localflavor/br/forms.py
localflavor/br/forms.py
# -*- coding: utf-8 -*- """ BR-specific Form helpers """ from __future__ import absolute_import, unicode_literals import re from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, CharField, Select from django.utils.translation import ugettext_lazy as _ try: from django.utils.encoding import smart_text except ImportError: from django.utils.encoding import smart_unicode as smart_text from .br_states import STATE_CHOICES phone_digits_re = re.compile(r'^(\d{2})[-\.]?(\d{4,5})[-\.]?(\d{4})$') class BRZipCodeField(RegexField): """ A form field that validates input as a Brazilian zip code, with the format XXXXX-XXX. """ default_error_messages = { 'invalid': _('Enter a zip code in the format XXXXX-XXX.'), } def __init__(self, max_length=None, min_length=None, *args, **kwargs): super(BRZipCodeField, self).__init__(r'^\d{5}-\d{3}$', max_length, min_length, *args, **kwargs) class BRPhoneNumberField(Field): """ A form field that validates input as a Brazilian phone number, that must be in either of the following formats: XX-XXXX-XXXX or XX-XXXXX-XXXX. """ default_error_messages = { 'invalid': _(('Phone numbers must be in either of the following ' 'formats: XX-XXXX-XXXX or XX-XXXXX-XXXX.')), } def clean(self, value): super(BRPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return '' value = re.sub('(\(|\)|\s+)', '', smart_text(value)) m = phone_digits_re.search(value) if m: return '%s-%s-%s' % (m.group(1), m.group(2), m.group(3)) raise ValidationError(self.error_messages['invalid']) class BRStateSelect(Select): """ A Select widget that uses a list of Brazilian states/territories as its choices. """ def __init__(self, attrs=None): super(BRStateSelect, self).__init__(attrs, choices=STATE_CHOICES) class BRStateChoiceField(Field): """ A choice field that uses a list of Brazilian states as its choices. """ widget = Select default_error_messages = { 'invalid': _('Select a valid brazilian state. That state is not one of the available states.'), } def __init__(self, required=True, widget=None, label=None, initial=None, help_text=None): super(BRStateChoiceField, self).__init__(required, widget, label, initial, help_text) self.widget.choices = STATE_CHOICES def clean(self, value): value = super(BRStateChoiceField, self).clean(value) if value in EMPTY_VALUES: value = '' value = smart_text(value) if value == '': return value valid_values = set([smart_text(k) for k, v in self.widget.choices]) if value not in valid_values: raise ValidationError(self.error_messages['invalid']) return value def DV_maker(v): if v >= 2: return 11 - v return 0 class BRCPFField(CharField): """ A form field that validates a CPF number or a CPF string. A CPF number is compounded by XXX.XXX.XXX-VD. The two last digits are check digits. More information: http://en.wikipedia.org/wiki/Cadastro_de_Pessoas_F%C3%ADsicas """ default_error_messages = { 'invalid': _("Invalid CPF number."), 'max_digits': _("This field requires at most 11 digits or 14 characters."), 'digits_only': _("This field requires only numbers."), } def __init__(self, max_length=14, min_length=11, *args, **kwargs): super(BRCPFField, self).__init__(max_length, min_length, *args, **kwargs) def clean(self, value): """ Value can be either a string in the format XXX.XXX.XXX-XX or an 11-digit number. """ value = super(BRCPFField, self).clean(value) if value in EMPTY_VALUES: return '' orig_value = value[:] if not value.isdigit(): value = re.sub("[-\.]", "", value) try: int(value) except ValueError: raise ValidationError(self.error_messages['digits_only']) if len(value) != 11: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))]) new_1dv = DV_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))]) new_2dv = DV_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value class BRCNPJField(Field): """ A form field that validates input as `Brazilian CNPJ`_. Input can either be of the format XX.XXX.XXX/XXXX-XX or be a group of 14 digits. .. _Brazilian CNPJ: http://en.wikipedia.org/wiki/National_identification_number#Brazil """ default_error_messages = { 'invalid': _("Invalid CNPJ number."), 'digits_only': _("This field requires only numbers."), 'max_digits': _("This field requires at least 14 digits"), } def clean(self, value): """ Value can be either a string in the format XX.XXX.XXX/XXXX-XX or a group of 14 characters. """ value = super(BRCNPJField, self).clean(value) if value in EMPTY_VALUES: return '' orig_value = value[:] if not value.isdigit(): value = re.sub("[-/\.]", "", value) try: int(value) except ValueError: raise ValidationError(self.error_messages['digits_only']) if len(value) != 14: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(list(range(5, 1, -1)) + list(range(9, 1, -1)))]) new_1dv = DV_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(list(range(6, 1, -1)) + list(range(9, 1, -1)))]) new_2dv = DV_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value
Python
0.000001
@@ -1183,20 +1183,19 @@ must +%0A be in -%0A eith
91178909bab31e9db42d86d5783152890f65795d
update cms
cms/urls.py
cms/urls.py
from django.conf.urls import url from cms import views from django.contrib.auth import views as auth_views urlpatterns = [ # 一覧 url(r'^dailyreport/$', views.daily_list, name='daily_list'), # 日報操作 url(r'^dailyreport/add/$', views.daily_edit, name='daily_add'), # 登録 url(r'^dailyreport/mod/(?P<daily_id>\d+)/$', views.daily_edit, name='daily_mod'), # 修正 url(r'^dailyreport/del/(?P<daily_id>\d+)/$', views.daily_del, name='daily_del'), # 削除 # コメント操作 url(r'^dailyreport/comment/add/(?P<daily_id>\d+)/$', views.comment_edit, name='comment_add'), # 登録 url(r'^dailyreport/comment/mod/(?P<daily_id>\d+)/(?P<impression_id>\d+)/$', views.comment_edit, name='comment_mod'), # 修正 # 詳細 url(r'^dailyreport/detail/(?P<daily_id>\d+)/$', views.daily_detail.as_view, name='daily_detail'), # 削除 ]
Python
0
@@ -638,18 +638,15 @@ (?P%3C -impression +comment _id%3E @@ -740,32 +740,26 @@ /detail/(?P%3C -daily_id +pk %3E%5Cd+)/$', vi @@ -782,16 +782,18 @@ .as_view +() , name='
1e6697fbeb6cf404d9a542287184c981c3c9e6aa
Add gzip flag to count
codonpdx.py
codonpdx.py
#!/usr/bin/env python import argparse import codonpdx.calc import codonpdx.count import codonpdx.insert import codonpdx.mirror import codonpdx.queueJobs import sys # create the top-level parser parser = argparse.ArgumentParser(prog='codonpdx', description='Codonpdx command line utility.') parser.add_argument('--version', action='version', version='%(prog)s 1.0') subparsers = parser.add_subparsers(help='Sub-command descriptions:') # create the parser for the "count" command parserCount = subparsers.add_parser( 'count', help='Count the codons of a file and produce JSON ' 'output containing the results.' ) parserCount.add_argument( '-i', '--infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='A file containing sequence data.' ) parserCount.add_argument( '-j', '--job', required=True, help='The UUID for the job if this process is placing its results into ' 'the results table.' ) parserCount.add_argument( '-f', '--format', choices=['fasta', 'genbank'], help='The file format.' ) parserCount.add_argument( '-p', '--pretty', action='store_true', help='Print the JSON in a pretty, more human-readable way.' ) parserCount.add_argument( '-o', '--output', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Where to place the output JSON.' ) parserCount.set_defaults( func=codonpdx.count.count ) # create the parser for the "insert" command parserLoadDB = subparsers.add_parser( 'insert', help='Insert organism codon count JSON information into the database.' ) parserLoadDB.add_argument( '-d', '--dbname', choices=['refseq', 'genbank', 'input'], help='The database table to store the count information in.' ) parserLoadDB.add_argument( '-i', '--infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='The file to to read the JSON data from. Defaults to standard input.' ) parserLoadDB.add_argument( '-j', '--job', required=True, help='The UUID for the job if this process is placing its results into ' 'the results table.' ) parserLoadDB.set_defaults( func=codonpdx.insert.insert ) # create the parser for the "calc" command parserCalcScore = subparsers.add_parser( 'calc', help='Compare an organism to all other organisms in a given sequence ' 'database.' ) parserCalcScore.add_argument( '-d', '--dbname', choices=['refseq', 'genbank'], help='The sequence database to compare the organism to.' ) parserCalcScore.add_argument( '-v', '--virus', required=True, help='The accession.version number of the organism to compare.' ) parserCalcScore.add_argument( '-w', '--virusdb', choices=['input', 'refseq', 'genbank'], default='input', help='The database table where the input virus resides.' ) parserCalcScore.add_argument( '-o', '--output', action='store_true', help='Output scores to stdout instead of storing in the results table.' ) parserCalcScore.add_argument( '-j', '--job', required=True, help='The UUID for the job if this process is placing its results into ' 'the results table.' ) parserCalcScore.set_defaults( func=codonpdx.calc.calc ) # create the mirror subcommand parserMirror = subparsers.add_parser( 'mirror', help='Mirror remote codon repository' ) parserMirror.add_argument( '-d', '--dbname', required=True, choices=['refseq', 'genbank'], help='The repository to mirror' ) parserMirror.set_defaults( func=codonpdx.mirror.mirror ) # create the queueJobs subcommand parserQueueJobs = subparsers.add_parser( 'queueJobs', help='Count and load repository codon counts into postgres' ) parserQueueJobs.add_argument( '-d', '--dbname', required=True, choices=['refseq', 'genbank'], help='The repository to parse' ) parserQueueJobs.add_argument( '-f', '--format', choices=['fasta', 'genbank'], help='The file format.' ) parserQueueJobs.set_defaults( func=codonpdx.queueJobs.queueJobs ) args = parser.parse_args() args.func(args)
Python
0
@@ -844,32 +844,174 @@ t.add_argument(%0A + '-g',%0A '--gzipped',%0A action='store_true',%0A default=False,%0A help='Indicates the input is gzipped.'%0A)%0AparserCount.add_argument(%0A '-j',%0A '-
b7d29e2a67c314b5d1aff343eef1a9ca2c3b0cbe
add dbl integration
cogs/dbl.py
cogs/dbl.py
import dbl from cogs.cog import Cog import logging import asyncio from threading import Thread logger = logging.getLogger('debug') class DBApi(Cog): def __init__(self, bot): super().__init__(bot) self._token = self.bot.config.dbl_token self.dbl = dbl.Client(self.bot, self._token) if not self.bot.test_mode: self.update_task = self.bot.loop.create_task(self.update_stats()) try: from sanic import Sanic from sanic.response import json except ImportError: return self.server = Thread() async def update_stats(self): while True: logger.info('Posting server count') try: await self.dbl.post_server_count() logger.info(f'Posted server count {len(self.bot.guilds)}') except Exception as e: logger.exception(f'Failed to post server count\n{e}') await asyncio.sleep(3600) def run_webhook_server(self, main_loop): asyncio.new_event_loop() app = Sanic() @app.route("/webhook", methods=["POST", ]) async def webhook(request): if request.headers.get('Authorization') != self.bot.config.dbl_auth: logger.warning('Unauthorized webhook access') return js = request.json main_loop.create_task(self.on_vote(int(js['bot']), int(js['user']), js['type'], js['isWeekend'])) return json({'a': 'a'}, status=200) if __name__ == "__main__": app.run(host=self.bot.config.dbl_server, port=self.bot.config.dbl_port) async def on_vote(self, bot: int, user: int, type: str, is_weekend: bool): print(f'{user} voted on bot {bot}') def setup(bot): bot.add_cog(DBApi(bot))
Python
0
@@ -613,19 +613,102 @@ Thread( -)%0A%0A +target=self.run_webhook_server, args=(self.bot.loop,))%0A self.server.start() %0A%0A as
da9f66dc6887895851132190cc82d51b42d57b6f
Add normalize_space option to StringField
grab/item/field.py
grab/item/field.py
from abc import ABCMeta, abstractmethod from datetime import datetime from ..tools.lxml_tools import clean_html from ..tools.text import find_number from .decorator import default, empty, cached, bind_item from .const import NULL class Field(object): """ All custom fields should extend this class, and override the get method. """ __metaclass__ = ABCMeta def __init__(self, xpath=None, default=NULL, empty_default=NULL, processor=None, **kwargs): self.xpath_exp = xpath self.default = default self.empty_default = empty_default self.processor = processor @abstractmethod def __get__(self, obj, objtype): pass def __set__(self, obj, value): obj._cache[self.attr_name] = value def process(self, value): if self.processor: return self.processor(value) else: return value class NullField(Field): @cached @default @empty @bind_item def __get__(self, item, itemtype): return self.process(None) class ItemListField(Field): def __init__(self, xpath, item_cls, *args, **kwargs): self.item_cls = item_cls super(ItemListField, self).__init__(xpath, *args, **kwargs) @cached @default @empty @bind_item def __get__(self, item, itemtype): subitems = [] for sel in item._selector.select(self.xpath_exp): subitem = self.item_cls(sel.node) subitem._parse() subitems.append(subitem) return self.process(subitems) class IntegerField(Field): def __init__(self, *args, **kwargs): self.ignore_spaces = kwargs.get('ignore_spaces', False) self.ignore_chars = kwargs.get('ignore_chars', None) super(IntegerField, self).__init__(*args, **kwargs) @cached @default @empty @bind_item def __get__(self, item, itemtype): value = item._selector.select(self.xpath_exp).text() if self.empty_default is not NULL: if value == "": return self.empty_default return find_number(self.process(value), ignore_spaces=self.ignore_spaces, ignore_chars=self.ignore_chars) class StringField(Field): @cached @default @empty @bind_item def __get__(self, item, itemtype): value = item._selector.select(self.xpath_exp).text() return self.process(value) class HTMLField(Field): def __init__(self, *args, **kwargs): self.safe_attrs = kwargs.pop('safe_attrs', None) super(HTMLField, self).__init__(*args, **kwargs) @cached @default @empty @bind_item def __get__(self, item, itemtype): value = item._selector.select(self.xpath_exp).html() if self.safe_attrs is not None: return self.process(clean_html(value, output_encoding='unicode')) else: return self.process(value) class ChoiceField(Field): def __init__(self, *args, **kwargs): self.choices = kwargs.pop('choices') super(ChoiceField, self).__init__(*args, **kwargs) @cached @default @empty @bind_item def __get__(self, item, itemtype): value = item._selector.select(self.xpath_exp).text() try: return self.process(self.choices[value]) except KeyError: raise ChoiceFieldError('Unknown choice: %s' % value) class RegexField(Field): def __init__(self, xpath, regex, *args, **kwargs): self.regex = regex super(RegexField, self).__init__(xpath, *args, **kwargs) @cached @default @bind_item def __get__(self, item, itemtype): value = item._selector.select(self.xpath_exp).text() match = self.regex.search(value) if match: return self.process(match.group(1)) else: raise DataNotFound('Could not find regex') class DateTimeField(Field): def __init__(self, xpath, datetime_format, *args, **kwargs): self.datetime_format = datetime_format super(DateTimeField, self).__init__(xpath, *args, **kwargs) @cached @default @bind_item def __get__(self, item, itemtype): value = item._selector.select(self.xpath_exp).text() return datetime.strptime(self.process(value), self.datetime_format) class FuncField(Field): def __init__(self, func, pass_item=False, *args, **kwargs): self.func = func self.pass_item = pass_item super(FuncField, self).__init__(*args, **kwargs) @cached @default @bind_item def __get__(self, item, itemtype): if self.pass_item: val = self.func(item, item._selector) else: val = self.func(item._selector) return self.process(val)
Python
0.000004
@@ -2251,32 +2251,200 @@ ngField(Field):%0A + def __init__(self, *args, **kwargs):%0A self.normalize_space = kwargs.pop('normalize_space', True)%0A super(StringField, self).__init__(*args, **kwargs)%0A%0A @cached%0A @@ -2562,38 +2562,96 @@ (self.xpath_exp) -.text( +%5C%0A .text(normalize_space=self.normalize_space )%0A return
0b16c74ec614bbd027020b8917d224bb7f97b5a1
make accessor be called
grappa/resolver.py
grappa/resolver.py
# -*- coding: utf-8 -*- import functools from .empty import empty from .assertion import AssertionProxy from .operator import OperatorTypes class OperatorResolver(object): """ Resolves and triggers an operator based on its name identifier. This class is highly-coupled to `grappa.Test` and consumes `grappa.Engine` and `grappa.Context` in order to trigger operator resolution logic. """ def __init__(self, test): self.test = test self.ctx = test._ctx self.engine = test._engine def run_attribute(self, operator): operator.run() def run_accessor(self, operator): # Register assertion function def assertion(subject): self.ctx.subject = subject return operator.run(subject) # Add assertion function self.engine.add_assertion(assertion) # Self-trigger tests if running as global if self.ctx.chained: self.test._trigger() return self.test def run_matcher(self, operator): # Process assert operators def wrapper(*expected, **kw): # Register keyword call self.engine.add_keyword({'call': expected, 'operator': operator}) # Retrieve optional custom assertion message if 'msg' in kw: # Set user-defined message self.ctx.message = kw.pop('msg') def assertion(subject): # Register call subjects operator.ctx.subject = subject operator.ctx.expected = expected return operator.run(subject, *expected, **kw) # Register assertion function self.test._engine.add_assertion(assertion) # Trigger tests on function call if running as chained call if self.ctx.chained or self.ctx.subject is not empty: return self.test._trigger() return self.test return AssertionProxy(self, operator, wrapper) def attribute_error_message(self, name): def reduce_operators(buf, operator): columns = 4 name, op = operator data = buf[op.kind] if len(data[-1]) < columns: data[-1].append(name) else: buf[op.kind].append([name]) return buf def calculate_space(name): max_space = 20 spaces = max_space - len(name) return ''.join([' ' for _ in range(spaces if spaces else 0)]) def spacer(names): return ''.join([name + calculate_space(name) for name in names]) def join(names): return '\n '.join([spacer(line) for line in names]) # Reduce operators names and select them per type operators = functools.reduce( reduce_operators, self.engine.operators.items(), { OperatorTypes.ATTRIBUTE: [[]], OperatorTypes.ACCESSOR: [[]], OperatorTypes.MATCHER: [[]] }) # Compose available operators message by type values = [' {}S:\n {}'.format(kind.upper(), join(names)) for kind, names in operators.items()] # Compose and return assertion message error return ('"{}" has no assertion operator called "{}"\n\n' ' However, you can use one of the following operators:\n\n' '{}\n').format(self.ctx.style, name, '\n\n'.join(values)) def resolve(self, name): # Check if should stop the call chain if self.ctx.stop_chain: raise RuntimeError( 'grappa: test operator "{}" does not allow ' 'chained calls.'.format(self.ctx.stop_chain.operator_name)) # Find an assertion operator by name operator = self.engine.find_operator(name) # Raise attribute error if not operator: raise AttributeError(self.attribute_error_message(name)) # Register attribute access self.engine.add_keyword(name) # Create operator instance with current context operator = operator(context=self.ctx, operator_name=name) # Check chainable operator logic is enabled if getattr(operator, 'chainable', True) is False: self.ctx.stop_chain = operator # Reset context sequence if self.ctx.reset: self.engine.reset_keywords() self.ctx.reset = False # self.ctx.reverse = True # Dynamically retrieve operator method_name = 'run_{}'.format(operator.kind) run_operator = getattr(self, method_name, None) # If operator kind is not support, raise an exception if not run_operator: raise ValueError('operator "{}" has not a valid kind "{}"'.format( operator.__class__.__name__, operator.kind )) # Register operator assertion for lazy execution return run_operator(operator) or self.test
Python
0
@@ -703,28 +703,32 @@ -self +operator .ctx.subject @@ -818,32 +818,38 @@ on%0A self. +test._ engine.add_asser @@ -942,16 +942,49 @@ .chained + or self.ctx.subject is not empty :%0A
1212966326eb096e10b52277b0c6b53126262e3b
Improve messages in example
examples/basic_usage.py
examples/basic_usage.py
import os from twilio.twiml import Response from twilio.rest import Client ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') def example(): """ Some example usage of different twilio resources. """ client = Client(ACCOUNT_SID, AUTH_TOKEN) print('Get all the messages...') all_messages = client.messages.list() print('There are {} messages in your account.'.format(len(all_messages))) print('Get only last 10 messages...') some_messages = client.messages.list(limit=10) print('Get messages in smaller pages...') some_messages = client.messages.list(page_size=10) print('Sending a message...') new_message = client.messages.create(to='XXXX', from_='YYYY', body='Twilio rocks!') print('Making a call...') new_call = client.calls.create(to='XXXX', from_='YYYY', method='GET') print('Serving TwiML') twiml_response = Response() twiml_response.say('Hello!') twiml_response.hangup() twiml_xml = twiml_response.toxml() print('Generated twiml: {}'.format(twiml_xml)) if __name__ == '__main__': example()
Python
0.000028
@@ -312,23 +312,18 @@ -print(' +# Get all the @@ -322,20 +322,16 @@ all -the messages ...' @@ -322,29 +322,24 @@ all messages -...') %0A all_mes @@ -448,31 +448,26 @@ ges)))%0A%0A -print(' +# Get only las @@ -478,26 +478,24 @@ messages... -') %0A some_me @@ -530,25 +530,24 @@ t(limit=10)%0A -%0A print('G @@ -545,16 +545,117 @@ print(' +Here are the last 10 messages in your account:')%0A for m in some_messages:%0A print(m)%0A%0A # Get mess @@ -682,19 +682,16 @@ s... -') %0A -some +all _mes @@ -732,16 +732,94 @@ size=10) +%0A print('There are %7B%7D messages in your account.'.format(len(all_messages))) %0A%0A pr
9b5dc2f9998d374263b2e1d35d6b5cfc7a831b1e
undo setuid on return
univention-openvpn/openvpn-master2.py
univention-openvpn/openvpn-master2.py
# # Univention OpenVPN integration -- openvpn-master2.py # __package__ = '' # workaround for PEP 366 import listener from univention import debug as ud import univention.uldap as ul from datetime import date from M2Crypto import RSA, BIO from base64 import b64decode name = 'openvpn-master2' description = 'create user openvpn package with updated config' filter = '(&(objectClass=univentionOpenvpn)(univentionOpenvpnActive=1))' attributes = ['univentionOpenvpnPort', 'univentionOpenvpnAddress'] modrdn = 1 pubbio = BIO.MemoryBuffer(''' -----BEGIN PUBLIC KEY----- MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAN0VVx22Oou8UTDsrug/UnZLiX2UcXeE GvQ6kWcXBhqvSUl0cVavYL5Su45RXz7CeoImotwUzrVB8JnsIcrPYw8CAwEAAQ== -----END PUBLIC KEY----- ''') pub = RSA.load_pub_key_bio(pubbio) pbs = pub.__len__() / 8 def license(key): try: enc = b64decode(key) raw = '' while len(enc) > pbs: d, key = (enc[:pbs], enc[pbs:]) raw = raw + pub.public_decrypt(d, 1) if len(enc) != pbs: return None # invalid license raw = raw + pub.public_decrypt(enc, 1) # items = raw.rstrip().split('\n') if not items: return None # invalid license vdate = int(items.pop(0)) if date.today().toordinal() > vdate: return None # expired l = {'valid': True} # at least one feature returned while items: kv = items.pop(0).split('=', 1) kv.append(True) l[kv[0]] = kv[1] return l # valid license except: return None # invalid license def maxvpnusers(key): mnlu = 5 try: return max(int(license(key)['u']), mnlu) except: return mnlu # invalid license # called to create (update) bundle for user when openvpn is activated def handler(dn, new, old, cmd): ud.debug(ud.LISTENER, ud.INFO, 'openvpn-master2.handler() invoked') if cmd == 'n': return name = new.get('cn', [None])[0] port = new.get('univentionOpenvpnPort', [None])[0] addr = new.get('univentionOpenvpnAddress', [None])[0] if not name or not port or not addr: return listener.setuid(0) lo = ul.getAdminConnection() vpnusers = lo.search('(univentionOpenvpnAccount=1)') vpnuc = len(vpnusers) licuc = 5 maxu = maxvpnusers(new.get('univentionOpenvpnLicense', [None])[0]) ud.debug(ud.LISTENER, ud.INFO, 'openvpn/handler: found %u active openvpn users (%u allowed)' % (vpnuc, maxu)) if vpnuc > maxu: return # do nothing for user in vpnusers: uid = user[1].get('uid', [None])[0] home = user[1].get('homeDirectory', [None])[0] ud.debug(ud.LISTENER, ud.INFO, 'openvpn/handler: create new certificate for %s in %s' % (uid, home)) if uid and home: # update bundle for this openvpn server with new config try: listener.run('/usr/lib/openvpn-int/create-bundle', ['create-bundle', 'no', uid, home, name, addr, port], uid=0) finally: listener.unsetuid() listener.unsetuid() ### end ###
Python
0.000002
@@ -2430,16 +2430,44 @@ %3E maxu:%0A + listener.unsetuid()%0A
3c5b44608529b70267b79266945020bc4c45ccb2
fix lack of dest folder
spidergram.py
spidergram.py
from bs4 import BeautifulSoup import requests import re import os import codecs import unidecode import arrow # disable warning about HTTPS try: requests.packages.urllib3.disable_warnings() except: pass class instaLogger: def __init__(self, logfile): self.logfile = logfile def logEntry(self, entry, level): with codecs.open(self.logfile, mode='a', encoding='utf-8') as log: log.write(entry + '\n') if 'progress' in level: print unidecode.unidecode(entry) class instagram: def __init__(self, logobj): self.logger = logobj self.dest = os.path.join(os.getcwdu(), 'images') if not os.path.exists(self.dest): os.makedirs(self.dest) self.results = None self.resetResults() self.baseUrl = None def resetResults(self): self.results = dict( count=0, skipped=0, failed=0, succeeded=0, nonexistent=0, ) def setBaseUrl(self, url): # storing base URL simplifies recursion self.baseUrl = url def downloadImage(self, imgurl, dest=None): # download an image, avoiding duplication. imgname = imgurl.split('/')[-1] if not dest: rdest = self.dest else: rdest = os.path.join(self.dest, dest) imgwrite = os.path.join(rdest, imgname) try: if not os.path.exists(imgwrite): r = requests.get(imgurl) with open(imgwrite, "wb") as code: code.write(r.content) self.logger.logEntry(('downloaded ' + imgname), 'progress') self.results['succeeded'] += 1 return True else: self.logger.logEntry(('already have ' + imgname), 'verbose') self.results['skipped'] += 1 return True except: self.logger.logEntry('failed to get: {0} from {1}'.format( imgurl, imgname), 'verbose') self.results['failed'] += 1 return None def findWindowSharedData(self, pageurl): page = requests.get(pageurl).content soup = BeautifulSoup(page, "html.parser") scripts = soup.find_all('script') for each in scripts: if each.string: if each.string.startswith('window._sharedData'): return each.string.split(' = ')[-1] def getLinksForGalleryPage(self, url): """ Recursive function to traverse the script payload that apparently is used to load Instagram pages completely on the fly. Pulls each individual "page" - which is apparently 49 images by the user, delineated by the "start_cursor" and "end_cursor" in the payload - so that it can be parsed for images, and then uses the ending cursor to generate the link to the next "page". """ username = baseurl.split('/')[-2] print "Downloaded {1} images. Scanning {0}...".format( url, self.results['succeeded']) payloadRaw = self.findWindowSharedData(url) payloadRaw = re.sub('/', '', payloadRaw) postIds = re.findall( '(?<=\{"code":").*?"', payloadRaw) for code in postIds: hrlink = self.getHighResLink(code[:-1]) self.downloadImage(hrlink, dest=username) hasNextId = re.search( '(?<=has_next_page":)[truefals]*', payloadRaw) if hasNextId.group(0) == "true": nextId = re.search( '(?<=end_cursor":")[0-9]*', payloadRaw) nextUrl = self.baseUrl + "?max_id=" + nextId.group(0) self.getLinksForGalleryPage(nextUrl) else: return def getHighResLink(self, code): pageurl = 'https://www.instagram.com/p/{0}/?hl=en'.format(code) payloadRaw = self.findWindowSharedData(pageurl) hrlink = re.findall( '(?<="display_src":").*?\?', payloadRaw)[0] hrlink = hrlink.replace('\\', '')[:-1] return hrlink if __name__ == "__main__": dt = arrow.utcnow().to('US/Pacific').format('YYYY-MM-DD') logfile = os.path.join('logs', str('spidergram ' + dt + '.log')) logger = instaLogger(logfile) site = instagram(logger) baseurl = "https://www.instagram.com/13thwitness/" site.setBaseUrl(baseurl) site.getLinksForGalleryPage(baseurl)
Python
0.000007
@@ -102,16 +102,33 @@ rt arrow +%0Aimport traceback %0A%0A# disa @@ -1424,24 +1424,93 @@ t, imgname)%0A + if not os.path.exists(rdest):%0A os.makedirs(rdest)%0A try: @@ -2057,24 +2057,87 @@ except:%0A + exc = traceback.format_exc()%0A print exc%0A @@ -2157,16 +2157,33 @@ ogEntry( +%0A 'failed @@ -2202,16 +2202,34 @@ from %7B1%7D + - Traceback:%5Cn%7B2%7D '.format @@ -2257,24 +2257,29 @@ url, imgname +, exc ), 'verbose' @@ -2834,19 +2834,16 @@ parently - is %0A @@ -2843,16 +2843,19 @@ +is used to @@ -2908,16 +2908,24 @@ lls each +%0A individ @@ -2927,24 +2927,16 @@ dividual -%0A %22page%22 @@ -2961,10 +2961,10 @@ tly +2 4 -9 ima @@ -2979,16 +2979,24 @@ he user, +%0A delinea @@ -3005,24 +3005,16 @@ d by the -%0A %22start_ @@ -3054,16 +3054,24 @@ ayload - +%0A so that @@ -3087,24 +3087,16 @@ e parsed -%0A for ima @@ -3131,16 +3131,24 @@ g cursor +%0A to gene @@ -3167,24 +3167,16 @@ k to the -%0A next %22p @@ -4696,24 +4696,24 @@ rl(baseurl)%0A + site.get @@ -4741,9 +4741,8 @@ aseurl)%0A -%0A
5ab3f3d06216381b697781d80069354745110de1
make yaml put out unicode
plexlibrary/utils.py
plexlibrary/utils.py
# -*- coding: utf-8 -*- import yaml class Colors(object): RED = "\033[1;31m" BLUE = "\033[1;34m" CYAN = "\033[1;36m" GREEN = "\033[0;32m" RESET = "\033[0;0m" BOLD = "\033[;1m" REVERSE = "\033[;7m" class YAMLBase(object): def __init__(self, filename): with open(filename, 'r') as f: try: self.data = yaml.safe_load(f) except yaml.YAMLError as e: raise e def __getitem__(self, k): return self.data[k] def __iter__(self, k): return self.data.itervalues()
Python
0.000063
@@ -29,16 +29,52 @@ rt yaml%0A +from yaml import Loader, SafeLoader%0A %0A%0Aclass @@ -317,16 +317,316 @@ ename):%0A + # Make sure pyyaml always returns unicode%0A def construct_yaml_str(self, node):%0A return self.construct_scalar(node)%0A Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)%0A SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)%0A%0A
819f1957f2575f24d9d9a99cb5999415f7e1247e
Add get_results_path() and get_child_experiments() to the experiments module.
experiments/__init__.py
experiments/__init__.py
import os import retrying import sys import time from .. import BaseAction, ActionStatus from ..helpers import get_attribute, get_multiprint, save_pickle_gz class BaseExperiment(BaseAction): pass class MetaExperiment(BaseExperiment): pass class Experiment(BaseExperiment): DEFAULT_RESULTS_ROOT = os.path.join('output', 'experiment') DEFAULT_TENSORBOARD_ROOT = os.path.join('output', 'experiment-tb') def _setup(self, config, status): config.require('name') config.require('model.name', 'model') config.define('iteration', default=None) config.model.name = os.path.basename(config.model.name) self._init_results_directory(config) self.model = self.create_model(config) def _init_results_directory(self, config): config.define('forced', 'force_replace', type=int, default=0) config.define('path.result.main.base', 'path.result.base', default=self.DEFAULT_RESULTS_ROOT) config.define('path.result.tensorboard.base', 'path.result.base.tensorboard', default=self.DEFAULT_TENSORBOARD_ROOT) if config.iteration is not None: config.name = '+' + config.name self.relative_results_path = os.path.join(config.model.name, config.name) if config.iteration is not None: self.relative_results_path = os.path.join(self.relative_results_path, config.iteration) self.relative_tensorboard_path = '{}@{}'.format(self.relative_results_path, int(time.time())) config.define('path.result.main.relative', 'path.result.relative', default=self.relative_results_path) config.define('path.result.tensorboard.relative', 'path.result.relative.tensorboard', default=self.relative_tensorboard_path) self.results_path = os.path.join(config.path.result.main.base, config.path.result.main.relative) self.tensorboard_path = os.path.join(config.path.result.tensorboard.base, config.path.result.tensorboard.relative) if os.path.exists(self.results_path): if not config.forced: raise RuntimeError('Experiment in \'{}\' already exists'.format(self.results_path)) else: os.makedirs(self.results_path) os.makedirs(self.tensorboard_path) self.log = get_multiprint(self.get_results_path('__log.txt')) self.log('Results path: ' + self.results_path) self.log('Tensorboard log path: ' + self.tensorboard_path) def _teardown(self, config, status, success): if success: self.save_model() self.save_metadata() self.mark_as_completed() def create_model(self, config, module=None): module = module or config.model.name return get_attribute(module, 'create', ['models','mlxm.models','mlxm.keras.models'])(config) @retrying.retry def save_metadata(self): self.log('Saving metadata... ', end='') config_dict = self.config.to_dict() save_pickle_gz(sys.argv, self.get_results_path('__args')) save_pickle_gz(config_dict, self.get_results_path('__env')) with open(self.get_results_path('__args.txt'), 'w') as f: f.write(' '.join(sys.argv)) with open(self.get_results_path('__env.txt'), 'w') as f: longest_key_length = max(len(key) for key in config_dict.keys()) f.write('\n'.join(['{: <{width}} = {}'.format(key.upper(), config_dict[key], width=longest_key_length) for key in sorted(config_dict.keys())])) with open(self.get_results_path('__model.yaml'), 'w') as f: f.write(self.model.to_yaml()) self.log('Done.') @retrying.retry def save_model(self, name='model-final'): self.log('Saving model "{}" ... '.format(name), end='') self.model.save(self.get_results_path(name + '.h5')) self.log('Done.') @retrying.retry def save_history(self, history, name='history'): self.log('Saving history "{}" ... '.format(name), end='') with open(self.get_results_path(name + '.txt'), 'w') as f: f.write('step') for model_name, metrics_names in history.metrics_names.items(): f.write('\t' + '\t'.join(model_name + '/' + metric for metric in metrics_names)) f.write('\n') for epoch, losses in enumerate(zip(*history.metrics.values())): f.write(str(epoch)) for loss in losses: if isinstance(loss, list): f.write('\t' + '\t'.join('{:0.4f}'.format(float(L)) for L in loss)) else: f.write('\t{:0.4f}'.format(float(loss))) f.write('\n') save_pickle_gz(history.metrics, self.get_results_path(name)) save_pickle_gz(history.metrics_names, self.get_results_path(name + '-metrics')) self.log('Done.') @retrying.retry def mark_as_completed(self, message=''): with open(self.get_results_path('__completed.txt'), 'w') as f: f.write(message) def get_results_path(self, filename=None): return os.path.join(self.results_path, filename) if filename is not None else self.results_path
Python
0
@@ -71,22 +71,8 @@ tion -, ActionStatus %0Afro @@ -137,16 +137,680 @@ kle_gz%0A%0A +def get_results_path(config, model_experiment_name, filename=''):%0A config.define('path.result.main.base', 'path.result.base', default=Experiment.DEFAULT_RESULTS_ROOT)%0A path = os.path.join(config('path.result.main.base'), model_experiment_name)%0A if filename:%0A path = os.path.join(path, filename)%0A return path%0A%0Adef get_child_experiments(base_path, max_index=1000):%0A if os.path.basename(base_path).startswith('+'):%0A for i in range(1, max_index+1):%0A path = os.path.join(base_path, str(i))%0A if os.path.exists(path):%0A yield path%0A else:%0A break%0A else:%0A yield base_path%0A%0A class Ba
16bec17e7337fd1cbaef12934cfeae05a563719f
fix var scoping bug
inbox/util/url.py
inbox/util/url.py
from dns.resolver import Resolver from dns.resolver import NoNameservers, NXDOMAIN, Timeout, NoAnswer from urllib import urlencode from inbox.log import get_logger import re log = get_logger('inbox.util.url') from inbox.providers import providers # http://www.regular-expressions.info/email.html EMAIL_REGEX = re.compile(r'[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,4}', re.IGNORECASE) # Use Google's Public DNS server (8.8.8.8) dns_resolver = Resolver() dns_resolver.nameservers = ['8.8.8.8'] class InvalidEmailAddressError(Exception): pass def provider_from_address(email_address): if not EMAIL_REGEX.match(email_address): raise InvalidEmailAddressError('Invalid email address') domain = email_address.split('@')[1].lower() mx_records = [] try: mx_records = dns_resolver.query(domain, 'MX') except NoNameservers: log.error("NoMXservers error", domain=domain) except NXDOMAIN: log.error("No such domain", domain=domain) except Timeout: log.error("Timed out while resolving", domain=domain) except NoAnswer: log.error("Provider didn't answer", domain=domain) ns_records = [] try: ns_records = dns_resolver.query(domain, 'NS') except NoNameservers: log.error("NoNameservers error", domain=domain) except NXDOMAIN: log.error("No such domain", domain=domain) except Timeout: log.error("Timed out while resolving", domain=domain) except NoAnswer: log.error("Provider didn't answer", domain=domain) for (p_name, p) in providers.iteritems(): mx_servers = p.get('mx_servers', []) ns_servers = p.get('ns_servers', []) domains = p.get('domains', []) if domain in domains: return p_name valid = True for rdata in mx_records: domain = str(rdata.exchange).lower() # Depending on how the MX server is configured, domain may # refer to a relative name or to an absolute one. # FIXME @karim: maybe resolve the server instead. if domain[-1] == '.': domain = domain[:-1] if domain not in mx_servers: valid = False break if valid: return p_name for rdata in ns_records: if str(rdata).lower() not in ns_servers: valid = False break if valid: return p_name return 'unknown' # From tornado.httputil def url_concat(url, args, fragments=None): """Concatenate url and argument dictionary regardless of whether url has existing query parameters. >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' """ if not args and not fragments: return url # Strip off hashes while url[-1] == '#': url = url[:-1] fragment_tail = '' if fragments: fragment_tail = '#' + urlencode(fragments) args_tail = '' if args: if url[-1] not in ('?', '&'): args_tail += '&' if ('?' in url) else '?' args_tail += urlencode(args) return url + args_tail + fragment_tail
Python
0
@@ -765,25 +765,24 @@ %5B1%5D.lower()%0A -%0A mx_recor @@ -1857,32 +1857,35 @@ ds:%0A +mx_ domain = str(rda @@ -2112,24 +2112,27 @@ if +mx_ domain%5B-1%5D = @@ -2150,24 +2150,27 @@ +mx_ domain = dom @@ -2166,16 +2166,19 @@ omain = +mx_ domain%5B: @@ -2193,24 +2193,27 @@ if +mx_ domain not i
201a9d75e9c4a2c84372fe58a674977f2435130f
update fastapi example.
examples/fastapi/app.py
examples/fastapi/app.py
from fastapi import FastAPI, HTTPException, APIRouter from honeybadger import honeybadger, contrib from honeybadger.contrib import asgi from honeybadger.contrib import fastapi import pydantic honeybadger.configure(api_key='c10787cf') app = FastAPI() # contrib.FastAPIHoneybadger(app) app.add_middleware(asgi.ASGIHoneybadger, params_filters=["user-agent", "host", "url", "query_string", "client"]) @app.get("/raise_some_error") def raise_some_error(a: str): """Raises an error.""" raise Exception(f"SomeError Occurred (a = {a})") class DivideRequest(pydantic.BaseModel): a: int b: int = 0 @app.post("/divide") def divide(req: DivideRequest): """Divides `a` by `b`.""" return req.a / req.b @app.post("/raise_404") def raise_404(req: DivideRequest, a: bool = True): raise HTTPException(status_code=404, detail="Raising on purpose.") some_router = APIRouter() @some_router.get("/some_router_endpoint") def some_router_endpoint(): raise Exception("Exception Raised by some router endpoint.") app.include_router(some_router)
Python
0
@@ -96,85 +96,8 @@ rib%0A -from honeybadger.contrib import asgi%0Afrom honeybadger.contrib import fastapi%0A impo @@ -170,42 +170,38 @@ API( -)%0A# contrib.FastAPIHoneybadger(app +title=%22Honeybadger - FastAPI.%22 )%0Aap @@ -221,12 +221,15 @@ are( -asgi +contrib .ASG @@ -262,53 +262,8 @@ rs=%5B -%22user-agent%22, %22host%22, %22url%22, %22query_string%22, %22cli @@ -270,17 +270,16 @@ ent%22%5D)%0A%0A -%0A @app.get @@ -298,16 +298,33 @@ e_error%22 +, tags=%5B%22Notify%22%5D )%0Adef ra @@ -344,16 +344,24 @@ r(a: str + = %22foo%22 ):%0A %22 @@ -523,16 +523,53 @@ /divide%22 +, response_model=int, tags=%5B%22Notify%22%5D )%0Adef di @@ -672,64 +672,192 @@ ise_ -404%22)%0Adef raise_404(req: DivideRequest, a: bool = True): +status_code%22, tags=%5B%22Don't Notify%22%5D)%0Adef raise_status_code(status_code: int = 404, detail: str = %22Forced 404.%22):%0A %22%22%22This exception is raised on purpose, so will not be notified.%22%22%22 %0A @@ -905,31 +905,15 @@ ail= -%22Raising on purpose.%22)%0A +detail) %0A%0Aso @@ -959,33 +959,33 @@ et(%22/some_router -_ +/ endpoint%22)%0Adef s @@ -977,16 +977,33 @@ ndpoint%22 +, tags=%5B%22Notify%22%5D )%0Adef so @@ -1024,16 +1024,62 @@ oint():%0A + %22%22%22Try raising an error from a router.%22%22%22%0A rais @@ -1167,8 +1167,9 @@ _router) +%0A
ba084db6c16e5dee9e9ff06a3bee02f4dbfb5c82
Add environment variable to control use of UNIX socket proxying
powerstrip.tac
powerstrip.tac
import os from twisted.application import service, internet #from twisted.protocols.policies import TrafficLoggingFactory from urlparse import urlparse from powerstrip.powerstrip import ServerProtocolFactory application = service.Application("Powerstrip") DOCKER_HOST = os.environ.get('DOCKER_HOST') if DOCKER_HOST is None: # Default to assuming we've got a Docker socket bind-mounted into a # container we're running in. DOCKER_HOST = "unix:///host-var-run/docker.real.sock" if "://" not in DOCKER_HOST: DOCKER_HOST = "tcp://" + DOCKER_HOST if DOCKER_HOST.startswith("tcp://"): parsed = urlparse(DOCKER_HOST) dockerAPI = ServerProtocolFactory(dockerAddr=parsed.hostname, dockerPort=parsed.port) elif DOCKER_HOST.startswith("unix://"): socketPath = DOCKER_HOST[len("unix://"):] dockerAPI = ServerProtocolFactory(dockerSocket=socketPath) #logged = TrafficLoggingFactory(dockerAPI, "api-") # Refuse to listen on a TCP port, until # https://github.com/ClusterHQ/powerstrip/issues/56 is resolved. # TODO: maybe allow to specify a numberic Docker group (gid) as environment # variable, and also (optionally) the name of the socket file it creates... dockerServer = internet.UNIXServer("/host-var-run/docker.sock", dockerAPI, mode=0660) dockerServer.setServiceParent(application)
Python
0
@@ -296,16 +296,83 @@ _HOST')%0A +ENABLE_UNIX_SOCKET = os.environ.get('POWERSTRIP_UNIX_SOCKET', %22%22)%0A%0A if DOCKE @@ -493,16 +493,124 @@ ing in.%0A + if %22YES%22 in ENABLE_UNIX_SOCKET:%0A DOCKER_HOST = %22unix:///host-var-run/docker.real.sock%22%0A else:%0A DOCK @@ -644,29 +644,24 @@ -run/docker. -real. sock%22%0Aif %22:/ @@ -1352,16 +1352,50 @@ ates...%0A +if %22YES%22 in ENABLE_UNIX_SOCKET:%0A dockerSe @@ -1472,16 +1472,18 @@ e=0660)%0A + dockerSe
33e693337ab646eaccb724b9c4b3eb3352c6e412
fix pagination
mapentity/pagination.py
mapentity/pagination.py
from rest_framework_datatables.pagination import DatatablesPageNumberPagination class MapentityDatatablePagination(DatatablesPageNumberPagination): """ Custom datatable pagination for Mapentity list views. """ def get_count_and_total_count(self, queryset, view): """ Handle count for all filters """ count, total_count = super().get_count_and_total_count(queryset, view) count = queryset.count() # replace count by real count - not only drf-datatables count return count, total_count
Python
0.998471
@@ -209,20 +209,30 @@ ws. %22%22%22%0A + pass %0A + # def get @@ -276,24 +276,26 @@ , view):%0A + # %22%22%22 Han @@ -323,24 +323,26 @@ ters %22%22%22%0A + # count, @@ -404,24 +404,26 @@ t, view)%0A + # count = @@ -506,16 +506,18 @@ ount%0A + # ret
15104ee34d78002231d51a465d02f8807da3cfc0
remove unused import
examples/quiver_demo.py
examples/quiver_demo.py
from matplotlib.toolkits.basemap import Basemap, interp from pylab import show, title, arange, meshgrid, cm, figure, sqrt, \ colorbar, axes, gca, reshape, array, Float32, quiverkey # read in data. file = open('fcover.dat','r') ul=[];vl=[];pl=[] nlons=73; nlats=73 dellat = 2.5; dellon = 5. for line in file.readlines(): l = line.replace('\n','').split() ul.append(float(l[0])) vl.append(float(l[1])) pl.append(float(l[2])) u = reshape(array(ul,Float32),(nlats,nlons)) v = reshape(array(vl,Float32),(nlats,nlons)) p = reshape(array(pl,Float32),(nlats,nlons)) lats1 = -90.+dellat*arange(nlats) lons1 = -180.+dellon*arange(nlons) lons, lats = meshgrid(lons1, lats1) # plot vectors in geographical (lat/lon) coordinates. # north polar projection. m = Basemap(lon_0=-135,boundinglat=25, resolution='c',area_thresh=10000.,projection='npstere') # create a figure, add an axes. fig=figure(figsize=(8,8)) ax = fig.add_axes([0.1,0.1,0.7,0.7]) # rotate wind vectors to map projection coordinates. # (also compute native map projections coordinates of lat/lon grid) # only do Northern Hemisphere. urot,vrot,x,y = m.rotate_vector(u[36:,:],v[36:,:],lons[36:,:],lats[36:,:],returnxy=True) # plot filled contours over map. cs = m.contourf(x,y,p[36:,:],15,cmap=cm.jet) # plot wind vectors over map. Q = m.quiver(x,y,urot,vrot) #or specify, e.g., width=0.003, scale=400) qk = quiverkey(Q, 0.95, 1.05, 25, '25 m/s', labelpos='W') cax = axes([0.875, 0.1, 0.05, 0.7]) # setup colorbar axes. colorbar(cax=cax) # draw colorbar axes(ax) # make the original axes current again m.drawcoastlines() m.drawcountries() # draw parallels delat = 20. circles = arange(0.,90.+delat,delat).tolist()+\ arange(-delat,-90.-delat,-delat).tolist() m.drawparallels(circles,labels=[1,1,1,1]) # draw meridians delon = 45. meridians = arange(-180,180,delon) m.drawmeridians(meridians,labels=[1,1,1,1]) title('Surface Winds Winds and Pressure (lat-lon grid)',y=1.075) # plot vectors in map projection coordinates. # north polar projection. m = Basemap(lon_0=-135,boundinglat=25, resolution='c',area_thresh=10000.,projection='npstere') # transform from spherical to map projection coordinates (rotation # and interpolation). nxv = 41; nyv = 41 nxp = 101; nyp = 101 spd = sqrt(u**2+v**2) udat, vdat, xv, yv = m.transform_vector(u,v,lons1,lats1,nxv,nyv,returnxy=True) pdat, xp, yp = m.transform_scalar(p,lons1,lats1,nxp,nyp,returnxy=True) # create a figure, add an axes. fig=figure(figsize=(8,8)) ax = fig.add_axes([0.1,0.1,0.7,0.7]) # plot image over map im = m.imshow(pdat,cm.jet) # plot wind vectors over map. Q = m.quiver(xv,yv,udat,vdat) #or specify, e.g., width=0.003, scale=400) qk = quiverkey(Q, 0.95, 1.05, 25, '25 m/s', labelpos='W') cax = axes([0.875, 0.1, 0.05, 0.7]) # setup colorbar axes. colorbar(cax=cax) # draw colorbar axes(ax) # make the original axes current again m.drawcoastlines() m.drawcountries() # draw parallels delat = 20. circles = arange(0.,90.+delat,delat).tolist()+\ arange(-delat,-90.-delat,-delat).tolist() m.drawparallels(circles,labels=[1,1,1,1]) # draw meridians delon = 45. meridians = arange(-180,180,delon) m.drawmeridians(meridians,labels=[1,1,1,1]) title('Surface Winds Winds and Pressure (projection grid)',y=1.075) show()
Python
0.000003
@@ -44,16 +44,8 @@ emap -, interp %0Afro
c7679393ae11766cc9da4474f4db1d0dbe50ac91
Bump to 0.11.0
watchman/__init__.py
watchman/__init__.py
__version__ = '0.10.1'
Python
0.000042
@@ -11,13 +11,13 @@ _ = '0.1 -0.1 +1.0 '%0A
815fecf36f9c0114a9aa8594b58226ead223b313
fix type bug
app/app.py
app/app.py
"""Do work""" import argparse import logging import os import sys from cameracontroller.cameracontroller import CameraController from storage.cloudstorage import CloudStorage logger = logging.getLogger('pypic') log_dir = os.path.expanduser('~/log') if not os.path.exists(log_dir): os.makedirs(log_dir) logging.basicConfig( filename=os.path.join(log_dir, 'pypiclog'), format='%(asctime)s :: %(levelname)s :: %(message)s', level=logging.DEBUG ) def exception_handler(exception_type, exception, traceback): logger.error(str(exception)) sys.excepthook = exception_handler def main(): """Main script execution""" parser = argparse.ArgumentParser() parser.add_argument( '-c', '--continuous', action='store_true', help='If set, run the video feed continuously' ) parser.add_argument( '-d', '--duration', default=10, help='Duration (in seconds) to run the video loop' ) args = parser.parse_args() camera_controller = CameraController( os.path.expanduser('~/pypic_output'), CloudStorage( os.environ.get('AZSTORAGE_ACCOUNT_NAME'), os.environ.get('AZSTORAGE_ACCOUNT_KEY') ) ) camera_controller.record_video( continuous=args.continuous, duration=args.duration ) if __name__ == '__main__': main()
Python
0.000001
@@ -889,16 +889,36 @@ ult=10,%0A + type=float,%0A
9b9631aade65c9be7fa632d5ae00060b2609224f
Load result JSON.
inthe_am/taskmanager/features/environment.py
inthe_am/taskmanager/features/environment.py
from collections import Counter import json import os import string from urlparse import urljoin from django.conf import settings from django.contrib.auth.models import User from splinter.browser import Browser TEST_COUNTERS = { 'following': Counter(), 'before': Counter(), 'demand': Counter() } ABSOLUTE_COUNTER = 0 def sanitize_name(name): acceptable_letters = [] for char in name: if char in string.letters: acceptable_letters.append(char) if char == ' ': acceptable_letters.append('_') return ''.join(acceptable_letters) def save_page_details(context, step=None, prefix='demand'): global TEST_COUNTERS, ABSOLUTE_COUNTER ABSOLUTE_COUNTER += 1 this_absolute_counter = ABSOLUTE_COUNTER scenario_name = sanitize_name(context.scenario.name) if step: step_name = sanitize_name(step.name) else: step_name = '' status = 'FAIL' if context.failed else 'OK' TEST_COUNTERS[prefix][scenario_name] += 1 this_counter = TEST_COUNTERS[prefix][scenario_name] name = '{absolute}_{scenario}_{num}_{step}_{prefix}_{status}'.format( absolute=str(this_absolute_counter).zfill(5), scenario=scenario_name, num=str(this_counter).zfill(2), step=step_name, prefix=prefix, status=status, ) context.browser.screenshot(name) with open(os.path.join('/tmp', name + '.html'), 'w') as out: out.write(context.browser.html.encode('utf-8')) if prefix == 'following': try: js_errors = { 'result': context.browser.evaluate_script( "JSON.stringify(JS_ERRORS);" ) } except Exception as e: print e js_errors = {'error': str(e)} try: console_log = { 'result': context.browser.evaluate_script( "JSON.stringify(CONSOLE_LOG);" ) } except Exception as e: print e console_log = {'error': str(e)} metadata = { 'js_errors': js_errors, 'console_log': console_log, } with open(os.path.join('/tmp', name + '.meta.json'), 'w') as out: try: out.write( json.dumps(metadata).encode('utf8') ) except Exception as e: print e def before_all(context): context.engine = getattr(settings, 'WEBDRIVER_BROWSER', 'phantomjs') # Ember is running on :8000, and it knows to send API traffic to :8001 # where this server is running. context.config.server_url = 'http://127.0.0.1:8000/' context.browser = Browser(context.engine) context.browser.driver.set_window_size(1024, 800) context.browser.driver.implicitly_wait(10) context.browser.driver.set_page_load_timeout(60) context.browser.visit(context.config.server_url) context.browser.execute_script( u"window.localStorage.setItem('disable_ticket_stream', 'yes');" ) def after_all(context): context.browser.quit() context.browser = None def before_step(context, step): if 'TRAVIS' in os.environ: try: save_page_details(context, step, 'before') except Exception as e: print e def after_step(context, step): if 'TRAVIS' in os.environ: try: save_page_details(context, step, 'following') except Exception as e: print e def before_scenario(context, step): User.objects.filter( email=settings.TESTING_LOGIN_USER ).delete() def after_scenario(context, step): if hasattr(context, 'teardown_steps'): for teardown_function in context.teardown_steps: teardown_function(context) context.teardown_steps = [] context.browser.visit(urljoin(context.config.server_url, '/logout/'))
Python
0
@@ -1589,32 +1589,64 @@ 'result': + json.loads(%0A context.browser @@ -1675,32 +1675,36 @@ + %22JSON.stringify( @@ -1712,24 +1712,46 @@ S_ERRORS);%22%0A + )%0A @@ -1930,16 +1930,48 @@ result': + json.loads(%0A context @@ -2008,32 +2008,36 @@ + %22JSON.stringify( @@ -2047,24 +2047,46 @@ SOLE_LOG);%22%0A + )%0A
b0dd7879fbf2000c86a2f77995495d480c890713
Add search by location
usecases/events/search_by_location.py
usecases/events/search_by_location.py
from predicthq import Client # Please copy paste your access token here # or read our Quickstart documentation if you don't have a token yet # https://developer.predicthq.com/guides/quickstart/ ACCESS_TOKEN = 'abc123' phq = Client(access_token=ACCESS_TOKEN)
Python
0
@@ -255,8 +255,1929 @@ OKEN)%0A%0A%0A +# The events endpoint supports three types of search by location:%0A# - by area%0A# - by fuzzy location search around%0A# - by geoname place ID (see places endpoint for more details)%0A%0A%0A# The within parameter allows you to search for events within%0A# a specified area. It expects a string in the form%0A# %7Bradius%7D%7Bunit%7D@%7Blatitude%7D,%7Blongitude%7D%0A# where the radius unit can be one of: m, km, ft, mi.%0A# https://developer.predicthq.com/resources/events/#param-within%0A# Please note that the the within parameter uses the lat, lon order%0A# but the location field in the event response uses the lon, lat GeoJSON order.%0Afor event in phq.events.search(within='[email protected],174.768368'):%0A print(event.rank, event.category, event.title, event.location)%0A%0A%0A# The fuzzy location search around doesn't restrict search results%0A# to the specified latitude, longitude and offset.%0A# In most cases, you only need to use the %60origin%60 key,%0A# e.g. %7B'origin': '%7Blat%7D,%7Blon%7D'%7D%0A# Please not that this affects the relevance of your search results.%0A# https://developer.predicthq.com/resources/events/#param-loc-around%0Afor event in phq.events.search(location_around=%7B'origin': '-36.844480,174.768368'%7D):%0A print(event.rank, event.category, event.title, event.location, event.relevance)%0A%0A%0A# Finally, you can specify a geoname place ID or a list of place IDs or%0A# airport codes (see https://developer.predicthq.com/csv/airport_codes.csv)%0A# The scope suffix (includes events having children or parent of the place ID)%0A# or the exact (only events with the specified place ID) suffixes can be used.%0A# https://developer.predicthq.com/resources/events/#param-place%0Afor event in phq.events.search(place=%7B'scope': '5128638'%7D): # place ID%0A print(event.rank, event.category, event.title, event.place_hierarchies)%0A%0Afor event in phq.events.search(place=%7B'scope': 'SFO'%7D): # airport code%0A print(event.rank, event.category, event.title, event.place_hierarchies)%0A
997cd53d1d045840118876227b9c5588e153195b
fix not equal override. thanks @hodgestar
cms/models.py
cms/models.py
import re import unicodedata RE_NUMERICAL_SUFFIX = re.compile(r'^[\w-]*-(\d+)+$') from gitmodel import fields, models class FilterMixin(object): @classmethod def filter(cls, **fields): items = list(cls.all()) for field, value in fields.items(): if hasattr(cls, field): items = [a for a in items if getattr(a, field) == value] else: raise Exception('invalid field %s' % field) return items class SlugifyMixin(object): def slugify(self, value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) return re.sub('[-\s]+', '-', value) def generate_slug(self): if hasattr(self, 'title') and self.title: if hasattr(self, 'slug') and not self.slug: self.slug = self.slugify(unicode(self.title))[:40] def save(self, *args, **kwargs): self.generate_slug() return super(SlugifyMixin, self).save(*args, **kwargs) class Category(FilterMixin, SlugifyMixin, models.GitModel): slug = fields.SlugField(required=True, id=True) title = fields.CharField(required=True) def __eq__(self, other): return self.slug == other.slug def __ne__(self, other): return self.slug == other.slug class Page(FilterMixin, SlugifyMixin, models.GitModel): slug = fields.SlugField(required=True, id=True) title = fields.CharField(required=True) content = fields.CharField(required=False) published = fields.BooleanField(default=True) primary_category = fields.RelatedField(Category, required=False)
Python
0.000002
@@ -1481,33 +1481,33 @@ eturn self.slug -= +! = other.slug%0A%0A%0Ac
dbfb095f6b90c2517416652d53b6db6b5ee919a4
Bump version
fabdeploy/__init__.py
fabdeploy/__init__.py
VERSION = (0, 3, 3, 'final', 0) def get_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s %s' % (version, VERSION[3], VERSION[4]) return version
Python
0
@@ -10,17 +10,17 @@ (0, 3, -3 +4 , 'final
dc49ce292d4e0669598abb7f45ba389efde0dabc
Fix testTeleopPanel
src/python/tests/testTeleopPanel.py
src/python/tests/testTeleopPanel.py
from director import robotsystem from director.consoleapp import ConsoleApp from director import transformUtils from director import visualization as vis from director import objectmodel as om from director import teleoppanel from director import playbackpanel from PythonQt import QtCore, QtGui import numpy as np def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame(pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False def onIkStartup(ikServer, startSuccess): side = 'left' goalFrame = transformUtils.frameFromPositionAndRPY([0.5, 0.5, 1.2], [0, 90, -90]) assert not checkGraspFrame(goalFrame, side) frame = teleopPanel.endEffectorTeleop.newReachTeleop(goalFrame, side) assert checkGraspFrame(goalFrame, side) teleopPanel.ui.planButton.click() assert playbackPanel.plan is not None teleopPanel.ikPlanner.useCollision = True; teleopPanel.ui.planButton.click() assert playbackPanel.plan is not None frame.setProperty('Edit', True) app.startTestingModeQuitTimer() app = ConsoleApp() app.setupGlobals(globals()) view = app.createView() robotsystem.create(view, globals()) playbackPanel = playbackpanel.PlaybackPanel(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) teleopPanel = teleoppanel.TeleopPanel(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan) manipPlanner.connectPlanReceived(playbackPanel.setPlan) ikServer.connectStartupCompleted(onIkStartup) startIkServer() w = QtGui.QWidget() l = QtGui.QGridLayout(w) l.addWidget(view, 0, 0) l.addWidget(playbackPanel.widget, 1, 0) l.addWidget(teleopPanel.widget, 0, 1, 2, 1) l.setMargin(0) l.setSpacing(0) w.show() w.resize(1600, 900) app.start(enableAutomaticQuit=False)
Python
0.000001
@@ -253,16 +253,51 @@ ackpanel +%0Afrom director import planningutils %0A%0Afrom P @@ -1888,16 +1888,104 @@ anner)%0A%0A +planningUtils = planningutils.PlanningUtils(robotStateModel, robotStateJointController)%0A teleopPa @@ -2205,16 +2205,31 @@ hidePlan +, planningUtils )%0A%0Amanip
e5f422315fbc3eb28f99d2be5f50232f4b081f85
make ERT materiality list_editable
valuenetwork/valueaccounting/admin.py
valuenetwork/valueaccounting/admin.py
from django.contrib import admin from valuenetwork.valueaccounting.models import * from valuenetwork.valueaccounting.actions import export_as_csv admin.site.add_action(export_as_csv, 'export_selected objects') admin.site.register(Unit) admin.site.register(AgentType) class CategoryAdmin(admin.ModelAdmin): list_display = ('name', 'applies_to', 'description','orderable' ) admin.site.register(Category, CategoryAdmin) class ResourceRelationshipAdmin(admin.ModelAdmin): list_display = ('name', 'inverse_name', 'related_to', 'direction', 'materiality', 'event_type' ) list_editable = ['event_type',] admin.site.register(ResourceRelationship, ResourceRelationshipAdmin) class EconomicAgentAdmin(admin.ModelAdmin): list_display = ('nick', 'name', 'agent_type', 'url', 'address', 'email', 'created_date') list_filter = ['agent_type',] search_fields = ['name', 'address'] admin.site.register(EconomicAgent, EconomicAgentAdmin) class EconomicResourceTypeAdmin(admin.ModelAdmin): list_display = ('name', 'category', 'rate', 'materiality', 'unit') list_filter = ['category', 'materiality',] search_fields = ['name',] list_editable = ['category',] admin.site.register(EconomicResourceType, EconomicResourceTypeAdmin) class AgentResourceTypeAdmin(admin.ModelAdmin): list_display = ('agent', 'resource_type', 'score','relationship') list_filter = ['agent', 'resource_type'] admin.site.register(AgentResourceType, AgentResourceTypeAdmin) class ProcessTypeResourceTypeAdmin(admin.ModelAdmin): list_display = ('process_type', 'resource_type', 'relationship') list_filter = ['process_type', 'resource_type'] search_fields = ['process_type__name','resource_type__name', 'relationship__name',] list_editable = ['relationship',] admin.site.register(ProcessTypeResourceType, ProcessTypeResourceTypeAdmin) class ProcessTypeResourceTypeInline(admin.TabularInline): model = ProcessTypeResourceType class ProcessTypeAdmin(admin.ModelAdmin): list_display = ('name', 'project' ) list_filter = ['project',] search_fields = ['name',] inlines = [ ProcessTypeResourceTypeInline, ] admin.site.register(ProcessType, ProcessTypeAdmin) class EventTypeAdmin(admin.ModelAdmin): list_display = ('name', 'resource_effect', 'unit_type' ) admin.site.register(EventType, EventTypeAdmin) class EconomicResourceAdmin(admin.ModelAdmin): list_display = ('id', 'identifier', 'resource_type', 'quantity', 'unit_of_quantity', 'quality', 'notes', 'owner', 'custodian') list_filter = ['resource_type', 'owner', 'custodian'] search_fields = ['identifier', 'resource_type__name'] admin.site.register(EconomicResource, EconomicResourceAdmin) class CommitmentInline(admin.TabularInline): model = Commitment class OrderItemInline(admin.TabularInline): model = Commitment fk_name = 'order' fields = ('event_type', 'relationship', 'due_date', 'resource_type', 'quantity', 'unit_of_quantity', 'process') class OrderAdmin(admin.ModelAdmin): list_display = ('provider', 'receiver', 'description','due_date' ) inlines = [ OrderItemInline, ] admin.site.register(Order, OrderAdmin) class ProcessAdmin(admin.ModelAdmin): date_hierarchy = 'start_date' list_display = ('name', 'start_date', 'end_date', 'process_type', 'project', 'owner', 'managed_by') list_filter = ['process_type', 'owner', 'managed_by'] search_fields = ['name', 'process_type__name', 'owner__name', 'managed_by__name'] inlines = [ CommitmentInline, ] admin.site.register(Process, ProcessAdmin) class ProjectAdmin(admin.ModelAdmin): list_display = ('name', 'parent') list_filter = ['parent',] search_fields = ['name',] admin.site.register(Project, ProjectAdmin) class CommitmentAdmin(admin.ModelAdmin): date_hierarchy = 'due_date' list_display = ('resource_type', 'quantity', 'unit_of_quantity', 'event_type', 'due_date', 'from_agent', 'process', 'project', 'order', 'independent_demand', 'description', 'quality') list_filter = ['independent_demand', 'event_type', 'resource_type', 'from_agent', 'project'] search_fields = ['name', 'event_type__name', 'from_agent__name', 'to_agent__name', 'resource_type__name'] admin.site.register(Commitment, CommitmentAdmin) class EconomicEventAdmin(admin.ModelAdmin): date_hierarchy = 'event_date' list_display = ('event_type', 'event_date', 'from_agent', 'project', 'resource_type', 'quantity', 'unit_of_quantity', 'description', 'url', 'quality') list_filter = ['event_type', 'project', 'resource_type', 'from_agent', ] search_fields = ['name', 'event_type__name', 'from_agent__name', 'to_agent__name', 'resource_type__name'] admin.site.register(EconomicEvent, EconomicEventAdmin) class CompensationAdmin(admin.ModelAdmin): list_display = ('initiating_event', 'compensating_event', 'compensation_date', 'compensating_value') search_fields = ['initiating_event__from_agent__name', 'initiating_event__to_agent__name'] admin.site.register(Compensation, CompensationAdmin)
Python
0.001909
@@ -1185,16 +1185,31 @@ tegory', + 'materiality', %5D%0A %0Aa
0d056e041f141391b115aef1f1cc5aa684876535
save signature saliency
view_saliency.py
view_saliency.py
#!/usr/bin/env python import cv2 import numpy import sys import salienpy.frequency_tuned import salienpy.signature def main(img): cv2.imshow('Original Image', img) ftuned = salienpy.frequency_tuned.frequency_tuned_saliency(img) cv2.imshow('Frequency Tuned', ftuned) signa = salienpy.signature.signature_saliency(img) cv2.imshow('Signature Saliency', signa) cv2.waitKey() if __name__ == '__main__': if len(sys.argv) > 1: img = cv2.imread(sys.argv[1]) else: cam = cv2.VideoCapture(0) status, img = cam.read() main(img)
Python
0
@@ -371,16 +371,56 @@ signa)%0A + cv2.imwrite('signature.png', signa)%0A cv2.
28968ca117fc18dfe513c06ce4ead2295830fd94
remove redundant parenthesis
plugins/BasePlugin.py
plugins/BasePlugin.py
__author__ = 'marcusmorgenstern' __mail__ = '' from abc import ABCMeta, abstractmethod class BasePlugin(): """ Metaclass for guarantee of interface. Each plugin must provide initialisation taking optional configuration and invoke method taking data """ __metaclass__ = ABCMeta def __init__(self, config=None): """ initialisation :param config (dict): configuration params for plugin :return: void """ self.dep = [] @abstractmethod def invoke(self, data): """ Entry for plugin execution :param data (dict): input data :return: void """ pass
Python
0.999999
@@ -103,10 +103,8 @@ ugin -() :%0A
b754ee143ed0a022706bfeed287e392e11dd0e28
Update to work with python3
external/stacktracer.py
external/stacktracer.py
"""Stack tracer for multi-threaded applications. Usage: import stacktracer stacktracer.start_trace("trace.html",interval=5,auto=True) # Set auto flag to always update file! .... stacktracer.stop_trace() """ # Source: http://code.activestate.com/recipes/577334-how-to-debug-deadlocked-multi-threaded-programs/ import sys import traceback from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter # Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/ def stacktraces(): code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) return highlight("\n".join(code), PythonLexer(), HtmlFormatter( full=False, # style="native", noclasses=True, )) # This part was made by nagylzs import os import time import threading class TraceDumper(threading.Thread): """Dump stack traces into a given file periodically.""" def __init__(self,fpath,interval,auto): """ @param fpath: File path to output HTML (stack trace file) @param auto: Set flag (True) to update trace continuously. Clear flag (False) to update only if file not exists. (Then delete the file to force update.) @param interval: In seconds: how often to update the trace file. """ assert(interval>0.1) self.auto = auto self.interval = interval self.fpath = os.path.abspath(fpath) self.stop_requested = threading.Event() threading.Thread.__init__(self) def run(self): while not self.stop_requested.isSet(): time.sleep(self.interval) if self.auto or not os.path.isfile(self.fpath): self.stacktraces() def stop(self): self.stop_requested.set() self.join() try: if os.path.isfile(self.fpath): os.unlink(self.fpath) except: pass def stacktraces(self): fout = file(self.fpath,"wb+") try: fout.write(stacktraces()) finally: fout.close() _tracer = None def trace_start(fpath,interval=5,auto=True): """Start tracing into the given file.""" global _tracer if _tracer is None: _tracer = TraceDumper(fpath,interval,auto) _tracer.setDaemon(True) _tracer.start() else: raise Exception("Already tracing to %s"%_tracer.fpath) def trace_stop(): """Stop tracing.""" global _tracer if _tracer is None: raise Exception("Not tracing, cannot stop.") else: _trace.stop() _trace = None
Python
0
@@ -2282,20 +2282,20 @@ fout = -file +open (self.fp @@ -2341,16 +2341,22 @@ t.write( +bytes( stacktra @@ -2360,16 +2360,26 @@ traces() +, %22UTF-8%22) )%0A
aba9a4dce1c10b975fcd5d33acc9d4636310ef4a
move translation fields to bottom of admin detail page
src/pressroom/models.py
src/pressroom/models.py
# python imports from datetime import datetime from django_extensions.db.fields import AutoSlugField import os # django imports from django.conf import settings from django.contrib.comments.moderation import CommentModerator, moderator from django.core.urlresolvers import reverse from django.db import models # other imports from photologue.models import Gallery, Photo from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField, AutoSlugField, UUIDField from taggit.managers import TaggableManager # Get relative media path try: PRESSROOM_DIR = settings.PRESSROOM_DIR except: PRESSROOM_DIR = 'pressroom' # define the models class ArticleManager(models.Manager): def get_published(self): return self.filter(publish=True).filter(translation_of=None).order_by('-pub_date') def get_drafts(self): return self.filter(publish=False) class Article(models.Model): language = models.CharField(max_length=10, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGES) translation_of = models.ForeignKey('Article', null=True, blank=True) pub_date = models.DateTimeField("Publish date", default=datetime.now) headline = models.CharField(max_length=200) slug = AutoSlugField(populate_from=('headline',), help_text='A "Slug" is a unique URL-friendly title for an object.') summary = models.TextField(help_text="A single paragraph summary or preview of the article.", default=u"", null=True, blank=True) body = models.TextField("Body text") author = models.CharField(max_length=100) publish = models.BooleanField("Publish on site", default=True, help_text='Articles will not appear on the site until their "publish date".') sections = models.ManyToManyField('Section', related_name='articles', null=True, blank=True) photos = models.ManyToManyField(Photo, related_name='articles', null=True, blank=True) documents = models.ManyToManyField('Document', related_name='articles', null=True, blank=True) enable_comments = models.BooleanField(default=True) tags = TaggableManager(blank=True) modified = ModificationDateTimeField() modified_by = models.ForeignKey('auth.User', null=True, blank=True, editable=False, related_name="modified_by") created = CreationDateTimeField() uid = UUIDField() # Custom article manager objects = ArticleManager() class Meta: ordering = ['-pub_date'] get_latest_by = 'pub_date' def __unicode__(self): return self.headline def get_absolute_url(self): args = self.pub_date.strftime("%Y/%b/%d").lower().split("/") + [self.language, self.slug] return reverse('pr-article-detail', args=args) class ArticleCommentModerator(CommentModerator): email_notification = True enable_field = 'enable_comments' def moderate(self, comment, content_object, request): return True if Article not in moderator._registry: moderator.register(Article, ArticleCommentModerator) class Document(models.Model): file = models.FileField("Document", upload_to=PRESSROOM_DIR+"/documents/%Y/%b/%d") pub_date = models.DateTimeField("Date published", default=datetime.now) title = models.CharField(max_length=200) slug = AutoSlugField(populate_from=('title',), help_text='A "Slug" is a unique URL-friendly title for an object.') summary = models.TextField() modified = ModificationDateTimeField() created = CreationDateTimeField() uid = UUIDField() class Meta: ordering = ['-pub_date'] get_latest_by = 'pub_date' def __unicode__(self): return self.title def get_absolute_url(self): args = self.pub_date.strftime("%Y/%b/%d").lower().split("/") + [self.slug] return reverse('pr-document-detail', args=args) def doc_dir(self): doc_dir = None if self.file is not None: doc_dir = os.path.dirname(self.file.path) return doc_dir def delete(self): doc_dir = self.doc_dir() super(Document, self).delete() if doc_dir is not None: if os.listdir(doc_dir) == []: os.removedirs(doc_dir) class Section(models.Model): title = models.CharField(max_length=80, unique=True) slug = AutoSlugField(populate_from=('title',), help_text='A "Slug" is a unique URL-friendly title for an object.') modified = ModificationDateTimeField() created = CreationDateTimeField() uid = UUIDField() class Meta: ordering = ['title'] def __unicode__(self): return self.title def get_absolute_url(self): return reverse('pr-section', args=[self.slug])
Python
0
@@ -955,190 +955,8 @@ %0D%0A%0D%0A - language = models.CharField(max_length=10, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGES)%0D%0A translation_of = models.ForeignKey('Article', null=True, blank=True)%0D%0A @@ -1988,24 +1988,208 @@ nk=True)%0D%0A%0D%0A + language = models.CharField(max_length=10, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGES)%0D%0A translation_of = models.ForeignKey('Article', null=True, blank=True)%0D%0A%0D%0A modified
0d5f8ab05b9f971edb1591e3d80d6972732430fe
Remove side-effect of renaming test.
plugins/Owner/test.py
plugins/Owner/test.py
### # Copyright (c) 2002-2005, Jeremiah Fincher # Copyright (c) 2009, James McCoy # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import sys if sys.version_info >= (2, 7, 0): from unittest import skip else: # Workaround def skip(string): return lambda x:None from supybot.test import * import supybot.conf as conf import supybot.plugin as plugin class OwnerTestCase(PluginTestCase): plugins = ('Owner', 'Config', 'Misc', 'Admin') def testHelpLog(self): self.assertHelp('help logmark') def testSrcAmbiguity(self): self.assertError('capability add foo bar') def testIrcquote(self): self.assertResponse('ircquote PRIVMSG %s :foo' % self.irc.nick, 'foo') def testFlush(self): self.assertNotError('flush') def testUpkeep(self): self.assertNotError('upkeep') def testLoad(self): self.assertError('load Owner') self.assertError('load owner') self.assertNotError('load Channel') self.assertNotError('list Owner') def testReload(self): self.assertError('reload Channel') self.assertNotError('load Channel') self.assertNotError('reload Channel') self.assertNotError('reload Channel') def testUnload(self): self.assertError('unload Foobar') self.assertNotError('load Channel') self.assertNotError('unload Channel') self.assertError('unload Channel') self.assertNotError('load Channel') self.assertNotError('unload CHANNEL') def testDisable(self): self.assertError('disable enable') self.assertError('disable identify') def testEnable(self): self.assertError('enable enable') def testEnableIsCaseInsensitive(self): self.assertNotError('disable Foo') self.assertNotError('enable foo') def testRename(self): self.assertError('rename Admin join JOIN') self.assertError('rename Admin join jo-in') self.assertNotError('rename Admin join testcommand') self.assertRegexp('list Admin', 'testcommand') self.assertNotRegexp('list Admin', 'join') self.assertError('help join') self.assertRegexp('help testcommand', 'Tell the bot to join') self.assertRegexp('join', 'not a valid command') self.assertHelp('testcommand') @skip('Nested commands cannot be renamed yet.') def testRenameNested(self): self.assertNotError('rename Admin "capability remove" rmcap') self.assertNotRegexp('list Admin', 'capability remove') self.assertRegexp('list Admin', 'rmcap') self.assertNotError('reload Admin') self.assertNotRegexp('list Admin', 'capability remove') self.assertRegexp('list Admin', 'rmcap') self.assertNotError('unrename Admin') self.assertRegexp('list Admin', 'capability remove') self.assertNotRegexp('list Admin', 'rmcap') def testDefaultPluginErrorsWhenCommandNotInPlugin(self): self.assertError('defaultplugin foobar owner') # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
Python
0
@@ -3831,16 +3831,120 @@ ommand') +%0A self.assertNotError('unrename Admin')%0A self.assertNotRegexp('list Admin', 'testcommand') %0A%0A @s
a500a9917787dec5c54c8d15949454ffb4a775ff
Remove unneeded RESPONSE definitions
examples/tcamapp/app.py
examples/tcamapp/app.py
import gi gi.require_version ("Gtk", "3.0") gi.require_version ("Gst", "1.0") gi.require_version ("Tcam", "0.1") from gi.repository import GdkX11, Gtk, Tcam, GstVideo, Gst, GdkPixbuf import sys class DeviceDialog (Gtk.Dialog): RESPONSE_OK = 1 RESPONSE_CANCEL = 2 def __init__(self, parent=None): Gtk.Dialog.__init__(self, parent) self.add_buttons (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK) model = Gtk.ListStore(GdkPixbuf.Pixbuf, str, str) self.__iv = Gtk.IconView.new_with_model(model) self.__iv.set_pixbuf_column(0) self.__iv.set_text_column(1) self.__iv.set_selection_mode (Gtk.SelectionMode.BROWSE) for dev in self.__get_devices(): pixbuf = Gtk.IconTheme.get_default().load_icon ( Gtk.STOCK_YES, 64, 0) label = "%s (%s)" % (dev[1], dev[0]) model.append ((pixbuf, label, dev[0])) self.get_content_area().add(self.__iv) def __get_devices(self): elem = Gst.ElementFactory.make("tcamsrc") if not elem: raise (RuntimeError, "Failed to create tcamsrc element") ret = [] for serial in elem.get_device_serials(): result, name, ident, conn_type = elem.get_device_info(serial) ret.append( (serial, name, ident, conn_type)) return ret def get_serial(self): return self.__iv.get_model()[self.__iv.get_selected_items()[0]][2] class AppWindow (Gtk.Window): def __init__ (self, serial): Gtk.Window.__init__(self) self.serial = serial self.pipeline = None self.set_title ("TCam Demo Applikation") self.connect ("destroy", Gtk.main_quit) hb = Gtk.HeaderBar() hb.set_show_close_button (True) hb.props.title = ("TCam Demo Applikation") self.set_titlebar (hb) combo = self.create_format_combo() combo.connect ("changed", self.on_format_combo_changed) hb.pack_start (combo) hb.show_all() vbox = Gtk.Box(Gtk.Orientation.VERTICAL) self.add(vbox) self.da = Gtk.DrawingArea() self.da.set_size_request (640, 480) self.da.set_double_buffered (True) vbox.pack_start (self.da, True, True, 0) vbox.show_all() self.da.realize() def create_format_combo (self): formats = self.get_format_list(self.serial) model = Gtk.ListStore (str, int) for fmt in formats: model.append (fmt) combo = Gtk.ComboBox.new_with_model (model) renderer_text = Gtk.CellRendererText() combo.pack_start (renderer_text, True) combo.add_attribute (renderer_text, "text", 0) #combo.set_active(0) return combo def on_format_combo_changed (self, combo): if self.pipeline: self.pipeline.set_state(Gst.State.NULL) self.pipeline.get_state(0) it = combo.get_active_iter() if it != None: model = combo.get_model() fmt = model[it][1] self.pipeline = self.create_pipeline(fmt) self.pipeline.set_state(Gst.State.PLAYING) def get_format_list(self, serial): elem = Gst.ElementFactory.make("tcamsrc") elem.set_property("serial", serial) elem.set_state(Gst.State.PAUSED) pad = elem.pads[0] caps = pad.query_caps() l = [] for i in range (caps.get_size()): s = caps.get_structure(i) text = "%s %dx%d" % (s.get_string("format"), s.get_int("width")[1], s.get_int("height")[1]) l.append((text, i)) elem.set_state(Gst.State.NULL) return l def create_pipeline(self, fmt): def bus_sync_handler(bus, msg, pipeline): if not GstVideo.is_video_overlay_prepare_window_handle_message(msg): return Gst.BusSyncReply.PASS msg.src.set_window_handle (self.da.get_window().get_xid()) return Gst.BusSyncReply.DROP p = Gst.Pipeline() src = Gst.ElementFactory.make("tcamsrc") src.set_property("serial", self.serial) p.add(src) p.set_state (Gst.State.PAUSED) srccaps = src.pads[0].query_caps() structure = srccaps.get_structure(fmt).copy() structure.remove_field("framerate") flt = Gst.ElementFactory.make("capsfilter") caps = Gst.Caps.from_string (structure.to_string()) flt.set_property("caps", caps) print ( "Caps String: " + structure.to_string()) converters = { "GRAY8": ("videoconvert",), "gbrg": ("bayer2rgb","videoconvert"), "GRAY16_LE" : ("videoconvert",) } colorformat = structure.get_string("format") prev_elem = src for conv in converters[colorformat]: elem = Gst.ElementFactory.make (conv) p.add(elem) prev_elem.link(elem) prev_elem = elem queue1 = Gst.ElementFactory.make ("queue") p.add (queue1) prev_elem.link(queue1) sink = Gst.ElementFactory.make ("glimagesink") p.add (sink) queue1.link(sink) bus = p.get_bus() bus.set_sync_handler (bus_sync_handler, p) return p if __name__ == "__main__": Gst.init() Gtk.init () serial = None if len(sys.argv) == 2: serial = sys.argv[1] else: dlg = DeviceDialog() dlg.show_all() resp = dlg.run() if resp != Gtk.ResponseType.OK: sys.exit(0) serial = dlg.get_serial() dlg.destroy() #formats = get_format_list(serial) #print formats win = AppWindow(serial) win.present() Gtk.main()
Python
0.000003
@@ -230,52 +230,8 @@ g):%0A - RESPONSE_OK = 1%0A RESPONSE_CANCEL = 2%0A
79e2044380d2d5a9568b76777bc7b1950dcaaeb8
Bump version to 14.1.0
recipe_scrapers/__version__.py
recipe_scrapers/__version__.py
__version__ = "14.0.0"
Python
0
@@ -11,13 +11,13 @@ _ = %2214. -0 +1 .0%22%0A
352b628955809e19a7ba6f78631d79afcc85d94a
Fix code coverage in mpi4py.typing
src/mpi4py/typing.py
src/mpi4py/typing.py
# Author: Lisandro Dalcin # Contact: [email protected] """Typing support.""" # pylint: disable=unnecessary-ellipsis # pylint: disable=too-few-public-methods import sys from typing import ( Any, Union, Optional, Sequence, List, Dict, Tuple, ) from numbers import ( Integral, ) from .MPI import ( Datatype, BottomType, InPlaceType, ) if sys.version_info >= (3, 8): from typing import Protocol else: try: from typing_extensions import Protocol except ImportError: Protocol = object del sys __all__ = [ 'SupportsBuffer', 'SupportsDLPack', 'SupportsCAI', 'Buffer', 'Bottom', 'InPlace', 'Aint', 'Count', 'Displ', 'Offset', 'TypeSpec', 'BufSpec', 'BufSpecB', 'BufSpecV', 'BufSpecW', 'TargetSpec', ] _Stream = Union[int, Any] _PyCapsule = object _DeviceType = int _DeviceID = int class SupportsBuffer(Protocol): """ Python buffer protocol. .. seealso:: :ref:`python:bufferobjects` """ def __buffer__(self, flags: int) -> memoryview: """Create a buffer from a Python object.""" ... class SupportsDLPack(Protocol): """ DLPack data interchange protocol. .. seealso:: :ref:`dlpack:python-spec` """ def __dlpack__(self, *, stream: Optional[_Stream] = None) -> _PyCapsule: """Export data for consumption as a DLPack capsule.""" ... def __dlpack_device__(self) -> Tuple[_DeviceType, _DeviceID]: """Get device type and device ID in DLPack format.""" ... class SupportsCAI(Protocol): """ CUDA Array Interface (CAI) protocol. .. seealso:: :ref:`numba:cuda-array-interface` """ @property def __cuda_array_interface__(self) -> Dict[str, Any]: """CAI protocol data.""" ... Buffer = Union[ SupportsBuffer, SupportsDLPack, SupportsCAI, ] """ Buffer-like object. """ Bottom = Union[BottomType, None] """ Start of the address range. """ InPlace = Union[InPlaceType, None] """ In-place buffer argument. """ Aint = Integral """ Address-sized integral type. """ Count = Integral """ Integral type for counts. """ Displ = Integral """ Integral type for displacements. """ Offset = Integral """ Integral type for offsets. """ TypeSpec = Union[Datatype, str] """ Datatype specification. """ BufSpec = Union[ Buffer, Tuple[Buffer, Count], Tuple[Buffer, TypeSpec], Tuple[Buffer, Count, TypeSpec], Tuple[Bottom, Count, Datatype], List, ] """ Buffer specification. * `Buffer` * Tuple[`Buffer`, `Count`] * Tuple[`Buffer`, `TypeSpec`] * Tuple[`Buffer`, `Count`, `TypeSpec`] * Tuple[`Bottom`, `Count`, `Datatype`] """ BufSpecB = Union[ Buffer, Tuple[Buffer, Count], Tuple[Buffer, TypeSpec], Tuple[Buffer, Count, TypeSpec], List, ] """ Buffer specification (block). * `Buffer` * Tuple[`Buffer`, `Count`] * Tuple[`Buffer`, `TypeSpec`] * Tuple[`Buffer`, `Count`, `TypeSpec`] """ BufSpecV = Union[ Buffer, Tuple[Buffer, Sequence[Count]], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]]], Tuple[Buffer, TypeSpec], Tuple[Buffer, Sequence[Count], TypeSpec], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], TypeSpec], Tuple[Buffer, Sequence[Count], Sequence[Displ], TypeSpec], Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Datatype], Tuple[Bottom, Sequence[Count], Sequence[Displ], Datatype], List, ] """ Buffer specification (vector). * `Buffer` * Tuple[`Buffer`, Sequence[`Count`]] * Tuple[`Buffer`, Tuple[Sequence[`Count`], Sequence[`Displ`]]] * Tuple[`Buffer`, `TypeSpec`] * Tuple[`Buffer`, Sequence[`Count`], `TypeSpec`] * Tuple[`Buffer`, Tuple[Sequence[`Count`], Sequence[`Displ`]], `TypeSpec`] * Tuple[`Buffer`, Sequence[`Count`], Sequence[`Displ`], `TypeSpec`] * Tuple[`Bottom`, Tuple[Sequence[`Count`], Sequence[`Displ`]], `Datatype`] * Tuple[`Bottom`, Sequence[`Count`], Sequence[`Displ`], `Datatype`] """ BufSpecW = Union[ Tuple[Buffer, Sequence[Datatype]], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], Tuple[Buffer, Sequence[Count], Sequence[Displ], Sequence[Datatype]], Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], Tuple[Bottom, Sequence[Count], Sequence[Displ], Sequence[Datatype]], List, ] """ Buffer specification (generalized). * Tuple[`Buffer`, Sequence[`Datatype`]] * Tuple[`Buffer`, \ Tuple[Sequence[`Count`], Sequence[`Displ`]], Sequence[`Datatype`]] * Tuple[`Buffer`, Sequence[`Count`], Sequence[`Displ`], Sequence[`Datatype`]] * Tuple[`Bottom`, \ Tuple[Sequence[`Count`], Sequence[`Displ`]], Sequence[`Datatype`]] * Tuple[`Bottom`, Sequence[`Count`], Sequence[`Displ`], Sequence[`Datatype`]] """ TargetSpec = Union[ Displ, Tuple[()], Tuple[Displ], Tuple[Displ, Count], Tuple[Displ, Count, TypeSpec], List, ] """ Target specification. * `Displ` * Tuple[()] * Tuple[`Displ`] * Tuple[`Displ`, `Count`] * Tuple[`Displ`, `Count`, `Datatype`] """
Python
0.000006
@@ -401,17 +401,38 @@ (3, 8): + # pragma: no branch %0A - from @@ -460,16 +460,36 @@ ol%0Aelse: + # pragma: no cover %0A try @@ -1179,32 +1179,52 @@ .%22%22%22%0A ... + # pragma: no cover %0A%0A%0Aclass Support @@ -1492,16 +1492,36 @@ ... + # pragma: no cover %0A%0A de @@ -1653,16 +1653,36 @@ ... + # pragma: no cover %0A%0A%0Aclass @@ -1916,24 +1916,24 @@ ol data.%22%22%22%0A - ...%0A @@ -1931,16 +1931,36 @@ ... + # pragma: no cover %0A%0A%0ABuffe
725785c59ca6aca23338b0f727dd2c492cb166df
fix a silly bug
process/LDA.py
process/LDA.py
# -*- coding: utf-8 -*- import jieba import time import json import pickle import os from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import LatentDirichletAllocation from util import RAW_DATA_DIR from util import STOP_WORDS from util import LDA_MODEL_PATH from util import DOC_PATH from util import TOPIC_PATH TOPICS_NUMBER = 4 TOPICS_WORD = 6 def __get_row_data(): """ build bag of words model """ raw_data = [] files_info = [] t0 = time.time() print('reading raw data') for parent, _, file_names in os.walk(RAW_DATA_DIR): for filename in file_names: full_file_name = os.path.join(parent, filename) with open(full_file_name, 'r', encoding='utf-8') as file_data: raw_data.append(file_data.read()) file_info = filename.split(':') files_info.append({'date':file_info[0], 'filename': file_info[1][:-4]}) print(f'got {len(raw_data)} files in {time.time()-t0}s') return files_info, raw_data def vectorizer(): print('extracting tf features') t0 = time.time() vectorized = CountVectorizer(max_df=0.8, min_df=0.01, stop_words=STOP_WORDS, analyzer='word', tokenizer=jieba.cut) print(f'finish in {time.time()-t0}s') return vectorized def __build_lda_model(tf): lda = LatentDirichletAllocation(n_topics=TOPICS_NUMBER, max_iter=5, learning_method='online', learning_offset=50., random_state=0) t0 = time.time() print('building lda model') lda.fit(tf) print(f'done in {time.time() - t0}') return lda def __topic_list(lda, feature_names): topic_list = [] for topic_idx, topic in enumerate(lda.components_): topic_list.append([feature_names[i] for i in topic.argsort()[:-TOPICS_WORD - 1:-1]]) return topic_list def __set_lda_info_to_file_info(file_info, lda_model): for index, item in enumerate(file_info): item['lda'] = lda_model[index].tolist() if __name__ == '__main__': file_info, raw_data = __get_row_data() vectorized = vectorizer() tf = vectorized.fit_transform(raw_data) lda = __build_lda_model(tf) topic_list = __topic_list(lda, vectorized.get_feature_names()) __set_lda_info_to_file_info(file_info, lda.transform(tf)) print('saving model') pickle.dump(pickle.dump, open(LDA_MODEL_PATH, 'wb')) json.dump(topic_list, open(TOPIC_PATH, 'w'), ensure_ascii=False) json.dump(file_info, open(DOC_PATH, 'w'), ensure_ascii=False)
Python
0.000753
@@ -2595,27 +2595,19 @@ le.dump( -pickle.dump +lda , open(L
7009e1f0b316da5f17247786810676f70d282f93
Add assertion.__all__
extenteten/assertion.py
extenteten/assertion.py
import collections import numpy import tensorflow as tf from .util import func_scope def is_int(num): return (isinstance(num, int) or isinstance(num, numpy.integer) or (isinstance(num, numpy.ndarray) and num.ndim == 0 and issubclass(num.dtype.type, numpy.integer))) def is_natural_num(num): return is_int(num) and num > 0 def is_natural_num_sequence(num_list, length=None): return (is_sequence(num_list) and all(is_natural_num(num) for num in num_list) and (length == None or len(num_list) == length)) def is_sequence(obj): return isinstance(obj, collections.Sequence) @func_scope() def assert_no_nan(tensor): return tf.assert_equal(tf.reduce_any(tf.is_nan(tensor)), False)
Python
0.002638
@@ -81,16 +81,139 @@ scope%0A%0A%0A +__all__ = %5B%0A 'is_int',%0A 'is_natural_num',%0A 'is_natural_num_sequence',%0A 'is_sequence',%0A 'assert_no_nan',%0A%5D%0A%0A%0A def is_i
0a79307cc5a0ece09a72faf2a530c768fe72311a
Fix scope of keymap
confluent_server/confluent/selfservice.py
confluent_server/confluent/selfservice.py
import confluent.config.configmanager as configmanager import confluent.collective.manager as collective import confluent.netutil as netutil import confluent.sshutil as sshutil import confluent.util as util import eventlet.green.subprocess as subprocess import crypt import json import time import yaml currtz = None keymap = 'us' currlocale = 'en_US.UTF-8' currtzvintage = None def yamldump(input): return yaml.safe_dump(input, default_flow_style=False) def handle_request(env, start_response): global currtz global currlocale global currtzvintage nodename = env.get('HTTP_CONFLUENT_NODENAME', None) apikey = env.get('HTTP_CONFLUENT_APIKEY', None) if not (nodename and apikey): start_response('401 Unauthorized', []) yield 'Unauthorized' return cfg = configmanager.ConfigManager(None) eak = cfg.get_node_attributes(nodename, 'crypted.selfapikey').get( nodename, {}).get('crypted.selfapikey', {}).get('hashvalue', None) if not eak: start_response('401 Unauthorized', []) yield 'Unauthorized' return salt = '$'.join(eak.split('$', 3)[:-1]) + '$' if crypt.crypt(apikey, salt) != eak: start_response('401 Unauthorized', []) yield 'Unauthorized' return retype = env.get('HTTP_ACCEPT', 'application/yaml') isgeneric = False if retype == '*/*': isgeneric = True retype = 'application/yaml' if retype == 'application/yaml': dumper = yamldump elif retype == 'application/json': dumper = json.dumps else: start_response('406 Not supported', []) yield 'Unsupported content type in ACCEPT: ' + retype return if env['REQUEST_METHOD'] not in ('HEAD', 'GET') and 'CONTENT_LENGTH' in env and int(env['CONTENT_LENGTH']) > 0: reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH'])) if env['PATH_INFO'] == '/self/deploycfg': myip = env.get('HTTP_X_FORWARDED_HOST', None) myip = myip.replace('[', '').replace(']', '') ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip) if ncfg['prefix']: ncfg['ipv4_netmask'] = netutil.cidr_to_mask(ncfg['prefix']) deployinfo = cfg.get_node_attributes( nodename, ('deployment.*', 'console.method', 'crypted.rootpassword', 'dns.*')) deployinfo = deployinfo.get(nodename, {}) profile = deployinfo.get( 'deployment.pendingprofile', {}).get('value', '') ncfg['profile'] = profile protocol = deployinfo.get('deployment.useinsecureprotocols', {}).get( 'value', 'never') ncfg['textconsole'] = bool(deployinfo.get( 'console.method', {}).get('value', None)) if protocol == 'always': ncfg['protocol'] = 'http' else: ncfg['protocol'] = 'https' ncfg['rootpassword'] = deployinfo.get('crypted.rootpassword', {}).get( 'hashvalue', None) if currtzvintage and currtzvintage > (time.time() - 30.0): ncfg['timezone'] = currtz else: langinfo = subprocess.check_output( ['localectl', 'status']).split(b'\n') for line in langinfo: line = line.strip() if line.startswith(b'System Locale:'): ccurrlocale = line.split(b'=')[-1] if not ccurrlocale: continue if not isinstance(ccurrlocale, str): ccurrlocale = ccurrlocale.decode('utf8') currlocale = ccurrlocale elif line.startswith(b'VC Keymap:'): ckeymap = line.split(b':')[-1] ckeymap = ckeymap.strip() if not ckeymap: continue if not isinstance(ckeymap, str): ckeymap = ckeymap.decode('utf8') keymap = ckeymap tdc = subprocess.check_output(['timedatectl']).split(b'\n') for ent in tdc: ent = ent.strip() if ent.startswith(b'Time zone:'): currtz = ent.split(b': ', 1)[1].split(b'(', 1)[0].strip() if not isinstance(currtz, str): currtz = currtz.decode('utf8') currtzvintage = time.time() ncfg['timezone'] = currtz break ncfg['locale'] = currlocale ncfg['keymap'] = keymap ncfg['nameservers'] = [] for dns in deployinfo.get( 'dns.servers', {}).get('value', '').split(','): ncfg['nameservers'].append(dns) dnsdomain = deployinfo.get('dns.domain', {}).get('value', None) ncfg['dnsdomain'] = dnsdomain start_response('200 OK', (('Content-Type', retype),)) yield dumper(ncfg) elif env['PATH_INFO'] == '/self/sshcert': if not sshutil.ca_exists(): start_response('500 Unconfigured', ()) yield 'CA is not configured on this system (run ...)' return cert = sshutil.sign_host_key(reqbody, nodename) start_response('200 OK', (('Content-Type', 'text/plain'),)) yield cert elif env['PATH_INFO'] == '/self/nodelist': nodes = set(cfg.list_nodes()) for mgr in configmanager.list_collective(): nodes.add(mgr) nodes.add(collective.get_myname()) if isgeneric: start_response('200 OK', (('Content-Type', 'text/plain'),)) for node in util.natural_sort(nodes): yield node + '\n' else: start_response('200 OK', (('Content-Type', retype),)) yield dumper(sorted(nodes)) elif env['PATH_INFO'] == '/self/updatestatus': update = yaml.safe_load(reqbody) if update['status'] != 'complete': raise Exception('Unknown update status request') currattr = cfg.get_node_attributes(nodename, 'deployment.*').get( nodename, {}) pending = currattr.get('deployment.pendingprofile', {}).get('value', '') updates = {} if pending: updates['deployment.pendingprofile'] = {'value': ''} currprof = currattr.get('deployment.profile', {}).get('value', '') if currprof != pending: updates['deployment.profile'] = {'value': pending} cfg.set_node_attributes({nodename: updates}) start_response('200 OK', (('Content-Type', 'text/plain'),)) yield 'OK' else: start_response('500 Error', (('Content-Type', 'text/plain'),)) yield 'No pending profile detected, unable to accept status update' else: start_response('404 Not Found', ()) yield 'Not found'
Python
0.000001
@@ -512,24 +512,42 @@ obal currtz%0A + global keymap%0A global c
3876130a94f3a43a6b34dd3be22ef963238bda3b
fix migration
mygpo/usersettings/migrations/0002_move_existing.py
mygpo/usersettings/migrations/0002_move_existing.py
import json from django.db import migrations from django.contrib.contenttypes.models import ContentType def move_podcastsettings(apps, schema_editor): PodcastConfig = apps.get_model("subscriptions", "PodcastConfig") UserSettings = apps.get_model("usersettings", "UserSettings") for cfg in PodcastConfig.objects.all(): if not json.loads(cfg.settings): continue setting, created = UserSettings.objects.update_or_create( user=cfg.user, # we can't get the contenttype from cfg.podcast as it would be a # different model content_type=ContentType.objects.filter(app_label='podcasts', model='podcast'), object_id=cfg.podcast.pk, defaults={ 'settings': cfg.settings, } ) def move_usersettings(apps, schema_editor): UserProfile = apps.get_model("users", "UserProfile") UserSettings = apps.get_model("usersettings", "UserSettings") for profile in UserProfile.objects.all(): if not json.loads(profile.settings): continue setting, created = UserSettings.objects.update_or_create( user=profile.user, content_type=None, object_id=None, defaults={ 'settings': profile.settings, } ) class Migration(migrations.Migration): dependencies = [ ('usersettings', '0001_initial'), ('subscriptions', '0002_unique_constraint'), ('users', '0011_syncgroup_blank'), ] operations = [ migrations.RunPython(move_podcastsettings), migrations.RunPython(move_usersettings), ]
Python
0
@@ -43,67 +43,8 @@ ons%0A -from django.contrib.contenttypes.models import ContentType%0A %0A%0Ade @@ -215,32 +215,96 @@ %22UserSettings%22) +%0A ContentType = apps.get_model('contenttypes', 'ContentType') %0A%0A for cfg in @@ -645,14 +645,11 @@ cts. -filter +get (app @@ -667,19 +667,16 @@ casts',%0A -
5c50d3fcda08da468b2f6b5e61fa1777cc08b17b
FIx test.
kolibri/content/test/test_downloadcontent.py
kolibri/content/test/test_downloadcontent.py
import os import tempfile import hashlib import mimetypes from django.test import TestCase, Client from django.test.utils import override_settings from kolibri.auth.models import DeviceOwner from kolibri.content.models import File, ContentNode from kolibri.content.utils.paths import get_content_storage_file_path from le_utils.constants import file_formats, format_presets CONTENT_STORAGE_DIR_TEMP = tempfile.mkdtemp() @override_settings( CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP, ) class DownloadContentTestCase(TestCase): """ Test case for the downloadcontent endpoint. """ def setUp(self): # create DeviceOwner to pass the setup_wizard middleware check DeviceOwner.objects.create(username='test-device-owner', password=123) self.client = Client() self.hash = hashlib.md5("DUMMYDATA".encode()).hexdigest() self.extension = dict(file_formats.choices).get("pdf") self.filename = "{}.{}".format(self.hash, self.extension) self.title = "abc123!@#$%^&*();'[],./?><" self.contentnode = ContentNode(title=self.title) self.available = True self.preset = format_presets.DOCUMENT self.file = File(checksum=self.hash, extension=self.extension, available=self.available, contentnode=self.contentnode, preset=self.preset) self.path = get_content_storage_file_path(self.filename) path_dir = os.path.dirname(self.path) if not os.path.exists(path_dir): os.makedirs(path_dir) tempfile = open(self.path, "w") tempfile.write("test") tempfile.close() def test_generate_download_filename(self): self.assertEqual(self.file.get_download_filename(), "abc123._Document.{}".format(self.extension)) def test_generate_download_url(self): self.assertEqual(self.file.get_download_url(), "/downloadcontent/{}/{}".format(self.filename, self.file.get_download_filename())) def test_download_existing_file(self): response = self.client.get(self.file.get_download_url()) self.assertEqual(response.status_code, 200) def test_download_non_existing_file(self): bad_download_url = self.file.get_download_url().replace(self.file.get_download_url()[25:25], "aaaaa") response = self.client.get(bad_download_url) self.assertEqual(response.status_code, 404) def test_download_headers(self): response = self.client.get(self.file.get_download_url()) self.assertEqual(response['Content-Type'], mimetypes.guess_type(self.filename)[0]) self.assertEqual(response['Content-Disposition'], 'attachment;') self.assertEqual(response['Content-Length'], str(os.path.getsize(self.path)))
Python
0
@@ -892,13 +892,8 @@ n = -dict( file @@ -905,27 +905,11 @@ ats. -choices).get(%22pdf%22) +PDF %0A
b410cbc1d58c5dce85b1bdff85fa881de58bf299
fix BadArgument
cogs/error.py
cogs/error.py
#!/bin/env python from discord.ext.commands import errors import sys import traceback class ErrorHandler: def __init__(self, bot): self.bot = bot async def on_command_error(self, ctx, error): """ Handle command errors more gracefully """ if isinstance(error, errors.CommandNotFound): return if isinstance(error, errors.NotOwner): return await ctx.send('Sorry, only the owner of qtbot may run this command.') if isinstance(error, errors.CommandOnCooldown): return await ctx.send(f'This command is on cooldown. Please retry in `{error.retry_after:.0f}` second(s).') if isinstance(error, errors.MissingRequiredArgument): return await ctx.send(f'Command missing required argument `{error.param}`.') if isinstance(error, errors.MissingPermissions): return await ctx.send(f'Sorry you need permissions: `{",".join(error.missing_perms)}` to do that.') if isinstance(error, errors.BotMissingPermissions): return await ctx.send(f'Sorry I need permissions: `{",".join(error.missing_perms)}` to do that.') if isinstance(error, errors.BadArgument): return await ctx.send(f'{error.message}') print(f'Ignoring exception in command {ctx.command}:', file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) def setup(bot): bot.add_cog(ErrorHandler(bot))
Python
0.998609
@@ -1238,26 +1238,27 @@ end( -f'%7B error. -message%7D' +__traceback__ )%0A%0A
08cb7b6d6ff7131fd378ff75a2de624d349dfd30
Change version to 2.9
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- # # Jansson documentation build configuration file, created by # sphinx-quickstart on Sun Sep 5 21:47:20 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('ext')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['refcounting'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Jansson' copyright = u'2009-2016, Petri Lehtinen' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.9-dev' # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = 'c:func' primary_domain = 'c' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Janssondoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Jansson.tex', u'Jansson Documentation', u'Petri Lehtinen', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'jansson', u'Jansson Documentation', [u'Petri Lehtinen'], 1) ]
Python
0.000001
@@ -1664,12 +1664,8 @@ '2.9 --dev '%0A#
d45f8c91a11c31c3a9a600d181836950259b8668
remove error prints
repository/netrepos/netauth.py
repository/netrepos/netauth.py
# # Copyright (c) 2004 Specifix, Inc. # # This program is distributed under the terms of the Common Public License, # version 1.0. A copy of this license should have been distributed with this # source file in a file called LICENSE. If it is not present, the license # is always available at http://www.opensource.org/licenses/cpl.php. # # This program is distributed in the hope that it will be useful, but # without any waranty; without even the implied warranty of merchantability # or fitness for a particular purpose. See the Common Public License for # full details. # import md5 import sqlite3 from lib import log class NetworkAuthorization: def check(self, authToken, write = False, label = None, trove = None): if label and label.getHost() != self.name: log.error("repository name mismatch") return False if not write and self.anonReads: return True if not authToken[0]: log.error("no authtoken received") return False stmt = """ SELECT troveName FROM (SELECT userId as uuserId FROM Users WHERE user=? AND password=?) JOIN Permissions ON uuserId=Permissions.userId LEFT OUTER JOIN TroveNames ON Permissions.troveNameId = TroveNames.troveNameId """ m = md5.new() m.update(authToken[1]) params = [authToken[0], m.hexdigest()] where = [] if label: where.append(" labelId=(SELECT labelId FROM Labels WHERE " \ "label=?) OR labelId is Null") params.append(label.asString()) if write: where.append("write=1") if where: stmt += "WHERE " + " AND ".join(where) cu = self.db.cursor() cu.execute(stmt, params) for (troveName, ) in cu: if not troveName or not trove: return True regExp = self.reCache.get(troveName, None) if regExp is None: regExp = re.compile(troveName) self.reCache[troveName] = regExp if regExp.match(trove): return True log.error("no permissions match for (%s, %s)" % authToken) return False def checkUserPass(self, authToken, label = None): if label and label.getHost() != self.name: log.error("repository name mismatch") return False stmt = "SELECT COUNT(userId) FROM Users WHERE user=? AND password=?" m = md5.new() m.update(authToken[1]) cu = self.db.cursor() cu.execute(stmt, authToken[0], m.hexdigest()) row = cu.fetchone() return row[0] def add(self, user, password, write=True): cu = self.db.cursor() m = md5.new() m.update(password) cu.execute("INSERT INTO Users VALUES (Null, ?, ?)", user, m.hexdigest()) userId = cu.lastrowid cu.execute("INSERT INTO Permissions VALUES (?, Null, Null, ?)", userId, write) self.db.commit() def iterUsers(self): cu = self.db.cursor() cu.execute("""SELECT Users.user, Users.userId, Permissions.write FROM Users LEFT JOIN Permissions ON Users.userId=Permissions.userId""") for row in cu: yield row def __init__(self, dbpath, name, anonymousReads = False): self.name = name self.db = sqlite3.connect(dbpath) self.anonReads = anonymousReads self.reCache = {} cu = self.db.cursor() cu.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'") tables = [ x[0] for x in cu ] if "Users" not in tables: cu.execute("""CREATE TABLE Users (userId INTEGER PRIMARY KEY, user STRING UNIQUE, password STRING)""") if "Labels" not in tables: cu.execute("""CREATE TABLE Labels (labelId INTEGER PRIMARY KEY, label STRING UNIQUE)""") if "TroveNames" not in tables: cu.execute("""CREATE TABLE TroveNames (troveNameId INTEGER PRIMARY KEY, troveName STRING UNIQUE)""") if "Permissions" not in tables: cu.execute("""CREATE TABLE Permissions (userId INTEGER, labelId INTEGER, troveNameId INTEGER, write INTEGER)""") cu.execute("""CREATE INDEX PermissionsIdx ON Permissions(userId, labelId, troveNameId)""") class InsufficientPermission(Exception): pass
Python
0.000001
@@ -597,28 +597,8 @@ ite3 -%0Afrom lib import log %0A%0Acl @@ -753,58 +753,8 @@ me:%0A - log.error(%22repository name mismatch%22)%0A @@ -833,32 +833,32 @@ return True%0A%0A + if not a @@ -874,55 +874,8 @@ 0%5D:%0A - log.error(%22no authtoken received%22)%0A @@ -2073,76 +2073,8 @@ ue%0A%0A - log.error(%22no permissions match for (%25s, %25s)%22 %25 authToken)%0A%0A @@ -2208,58 +2208,8 @@ me:%0A - log.error(%22repository name mismatch%22)%0A
164f8e665dd2a292dbfe44ba98989725c209990d
Update radio.py
cogs/radio.py
cogs/radio.py
from .utils import config, checks, formats import discord from discord.ext import commands import discord.utils from .utils.api.pycopy import Copy import random, json, asyncio class Radio: """The radio-bot related commands.""" def __init__(self, bot): self.bot = bot self.player = None self.stopped = True self.q = asyncio.Queue() self.play_next_song = asyncio.Event() self.current_song = None copy_creds = self.load_copy_creds() self.copycom = Copy(copy_creds['login'], copy_creds['passwd']) self.songs = [] self.update_song_list() if not discord.opus.is_loaded(): discord.opus.load_opus('/usr/local/lib/libopus.so') #FreeBSD path def load_copy_creds(): with open('../copy_creds.json') as f: return json.load(f) @property def is_playing(self): return self.player is not None and self.player.is_playing() and not self.stopped def toggle_next_song(self): if not self.stopped: self.bot.loop.call_soon_threadsafe(self.play_next_song.set) def update_song_list(self): self.files = self.copycom.list_files(settings.copy_radio_path) @commands.command() async def join(self, *, channel : discord.Channel = None): """Join voice channel. """ if channel is None or channel != discord.ChannelType.voice: await self.bot.say('Cannot find a voice channel by that name.') await self.bot.join_voice_channel(channel) @commands.command() async def leave(self): """Leave voice channel. """ await self.stop().invoke(ctx) await self.bot.voice.disconnect() @commands.command() async def pause(self): """Pause. """ if self.player is not None: self.player.pause() @commands.command() async def resume(self): """Resume playing. """ if self.player is not None and not self.is_playing(): self.player.resume() @commands.command() async def skip(self): """Skip song and play next. """ if self.player is not None and self.is_playing(): self.player.stop() self.toggle_next_song() @commands.command() async def stop(): """Stop playing song. """ if self.is_playing(): self.stopped = True self.player.stop() @commands.command(pass_context=True) async def play(self, ctx): """Start playing song from queue. """ if self.player is not None: if not self.is_playing(): await self.resume().invoke(ctx) return else: await self.bot.say('Already playing a song') return while True: if not selfbot.is_voice_connected(): await self.join(channel=ctx.message.author.voice_channel).invoke(ctx) continue if self.q.empty(): await self.q.put(random.choice(self.songs)) self.play_next_song.clear() self.current = await self.q.get() self.player = self.bot.voice.create_ffmpeg_player( self.copycom.direct_link(settings.copy_radio_path + self.current), after=self.toggle_next_song, #options="-loglevel debug -report", headers = dict(self.copycom.session.headers)) self.stopped = False self.player.start() fmt = 'Playing song "{0}"' song_name = unquote(self.current.split('/')[-1]) await bot.say(fmt.format(song_name)) self.bot.change_status(discord.Game(name=song_name)) await self.play_next_song.wait() def setup(bot): bot.add_cog(Radio(bot))
Python
0.000001
@@ -784,16 +784,20 @@ y_creds( +self ):%0D%0A
76f699b91aacfce180268be32fe0689ae7b4bd47
Fix up doc-strings and line-lengths
requests_ntlm/requests_ntlm.py
requests_ntlm/requests_ntlm.py
from requests.auth import AuthBase from requests.adapters import HTTPAdapter from requests.models import PreparedRequest from ntlm import ntlm import weakref class HttpNtlmAuth(AuthBase): """HTTP NTLM Authentication Handler for Requests. Supports pass-the-hash.""" def __init__(self, username, password, session=None): """ :username - Username in 'domain\\username' format :password - Password or hash in "ABCDABCDABCDABCD:ABCDABCDABCDABCD" format. :session - Optional requests.Session, through which connections are pooled. """ if ntlm is None: raise Exception("NTLM libraries unavailable") #parse the username try: self.domain, self.username = username.split('\\', 1) except ValueError: raise ValueError("username should be in 'domain\\username' format.") self.domain = self.domain.upper() self.password = password self.adapter = HTTPAdapter() # Keep a weak reference to the Session, if one is in use. This is to avoid a circular reference. self.session = weakref.ref(session) if session else None def retry_using_http_NTLM_auth(self, auth_header_field, auth_header, response, args): """Attempts to authenticate using HTTP NTLM challenge/response""" if auth_header in response.request.headers: return response request = copy_request(response.request) # Pick an adapter to use. If a Session is in use, get the adapter from it. adapter = self.adapter if self.session: session = self.session() if session: adapter = session.get_adapter(response.request.url) # initial auth header with username. will result in challenge auth = 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE("%s\\%s" % (self.domain,self.username)) request.headers[auth_header] = auth # A streaming response breaks authentication. # This can be fixed by not streaming this request, which is safe because # the returned response3 will still have stream=True set if specified in # args. In addition, we expect this request to give us a challenge # and not the real content, so the content will be short anyway. args_nostream = dict(args, stream=False) response2 = adapter.send(request, **args_nostream) # needed to make NTLM auth compatible with requests-2.3.0 response2.content # this is important for some web applications that store authentication-related info in cookies (it took a long time to figure out) if response2.headers.get('set-cookie'): request.headers['Cookie'] = response2.headers.get('set-cookie') # get the challenge auth_header_value = response2.headers[auth_header_field] ntlm_header_value = list(filter(lambda s: s.startswith('NTLM '), auth_header_value.split(',')))[0].strip() ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(ntlm_header_value[5:]) # build response request = copy_request(request) auth = 'NTLM %s' % ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.username, self.domain, self.password, NegotiateFlags) request.headers[auth_header] = auth response3 = adapter.send(request, **args) # Update the history. response3.history.append(response) response3.history.append(response2) return response3 def response_hook(self, r, **kwargs): if r.status_code == 401 and 'ntlm' in r.headers.get('www-authenticate','').lower(): return self.retry_using_http_NTLM_auth('www-authenticate', 'Authorization', r, kwargs) if r.status_code == 407 and 'ntlm' in r.headers.get('proxy-authenticate','').lower(): return self.retry_using_http_NTLM_auth('proxy-authenticate', 'Proxy-authorization', r, kwargs) return r def __call__(self, r): # we must keep the connection because NTLM authenticates the connection, not single requests r.headers["Connection"] = "Keep-Alive" r.register_hook('response', self.response_hook) return r def copy_request(request): """ Copies a Requests PreparedRequest. """ new_request = PreparedRequest() new_request.method = request.method new_request.url = request.url new_request.body = request.body new_request.hooks = request.hooks new_request.headers = request.headers.copy() return new_request
Python
0.004543
@@ -183,23 +183,29 @@ hBase):%0A +%0A %22%22%22 +%0A HTTP NTL @@ -242,16 +242,21 @@ equests. +%0A%0A Support @@ -271,16 +271,21 @@ he-hash. +%0A %22%22%22%0A%0A @@ -351,33 +351,93 @@ +r %22%22%22 -%0A : +Create an authentication handler for NTLM over HTTP.%0A%0A :param str username - @@ -432,20 +432,17 @@ username - - +: Usernam @@ -484,21 +484,27 @@ - : +:param str password - @@ -499,20 +499,17 @@ password - - +: Passwor @@ -520,16 +520,28 @@ hash in +%0A %22ABCDAB @@ -589,25 +589,27 @@ - : +:param str session - - +: Opt @@ -645,16 +645,28 @@ gh which +%0A connect @@ -777,16 +777,17 @@ lable%22)%0A +%0A @@ -787,16 +787,17 @@ # + parse th @@ -941,16 +941,33 @@ ueError( +%0A %22usernam @@ -1008,17 +1008,31 @@ format.%22 -) +%0A )%0A %0A @@ -1204,16 +1204,26 @@ in use. +%0A # This is @@ -1515,21 +1515,21 @@ response +. %22%22%22%0A -%0A @@ -1724,16 +1724,26 @@ adapter +%0A # from it
1a83696454d5be09b07d1e1e6a23ea76c77012a9
Fix global imports
src/rnaseq_lib/__init__.py
src/rnaseq_lib/__init__.py
import rnaseq_lib.R import rnaseq_lib.civic import rnaseq_lib.data import rnaseq_lib.de import rnaseq_lib.dim_red import rnaseq_lib.docker import rnaseq_lib.drugs import rnaseq_lib.graphs import rnaseq_lib.gtf import rnaseq_lib.images import rnaseq_lib.plotting import rnaseq_lib.tissues import rnaseq_lib.utils import rnaseq_lib.web import rnaseq_lib.web.openfda import rnaseq_lib.web.synapse
Python
0.005989
@@ -1,24 +1,4 @@ -import rnaseq_lib.R%0A impo @@ -59,17 +59,23 @@ eq_lib.d -e +iff_exp %0Aimport @@ -214,16 +214,17 @@ .images%0A +%0A import r @@ -241,12 +241,89 @@ plot -ting +%0Aimport rnaseq_lib.plot.dr%0Aimport rnaseq_lib.plot.hview%0Aimport rnaseq_lib.tissues %0Aimp @@ -336,32 +336,39 @@ aseq_lib.tissues +.plots%0A %0Aimport rnaseq_l
1a6516765f7d95d8a3d89449dc181a9de27cb868
Shove the input into the main method
files/create_project.py
files/create_project.py
# # This script checks to see if a project exists for the given # app_env/team. # import os import sys from optparse import OptionParser from urllib import quote from sentry.utils.runner import configure configure() from django.conf import settings # Add in the sentry object models from sentry.models import Organization, Project, ProjectKey, Team, User def build_parser(): parser = OptionParser() parser.add_option("-p", "--project", dest="project", help="Application/Project name.", type="string") parser.add_option("-l", "--platform", dest="platform", help="Application Language/Platform.", type="string") parser.add_option("-o", "--org", dest="org", help="Organization to own this project", type="string") parser.add_option("-t", "--team", dest="team", help="Team to own this project", type="string") parser.add_option("-v", "--verbose", dest="verbose", help="Verbose output", action="store_true") parser.add_option("-s", "--sentry-path", dest="sentry_path", help="Path to sentry project", action="store_true") return parser def main(): parser = build_parser() options, _args = parser.parse_args() os.environ['SENTRY_CONF'] = options.sentry_path admin_email = settings.SENTRY_OPTIONS['system.admin-email'] if not options.project: parser.error("Project name required") if not options.platform: parser.error("Platform is required") try: o = Organization.objects.get(name=options.org) except Organization.DoesNotExist: print "Organization not found: %s" % options.org sys.exit(1) try: u = User.objects.get(email=admin_email) except User.DoesNotExist: print "Admin user not found: %s" % admin_email sys.exit(1) # try to load the requested team try: t = Team.objects.get(name=options.team, organization_id=o.id) except Team.DoesNotExist: # this team does not yet exist. Create it. t = Team() t.name = options.team t.organization_id = o.id t.owner_id = u.id t.save() # reload the object t = Team.objects.get(name=options.team, organization_id=o.id) try: p = Project.objects.get(name=options.project, team_id=t.id) except: # the project doesn't exist. Create it! p = Project() # ensure all project names are in lowercase p.name = options.project.lower() p.team_id = t.id p.organization_id = o.id p.platform = options.platform try: p.save() except: print "Project save failed for %s" % (options.project) sys.exit(1) # create a static file containing this application's DSN k = ProjectKey.objects.get(project_id=p.id).get_dsn() prefix = quote(o.name.lower() + "-" + t.name.lower() + "-") dsn_path = "%s/dsn/%s%s" % (options.sentry_path, prefix, p.name) dsn = open(dsn_path, 'w') dsn.write(k) dsn.close() if options.verbose: print "Project %s created in team %s." % (options.project, t.name) if __name__ == "__main__": main()
Python
0.999874
@@ -160,204 +160,8 @@ te%0A%0A -from sentry.utils.runner import configure%0Aconfigure()%0A%0Afrom django.conf import settings%0A%0A# Add in the sentry object models%0Afrom sentry.models import Organization, Project, ProjectKey, Team, User%0A%0A %0Adef @@ -825,34 +825,28 @@ oject%22, -action=%22store_true +type=%22string %22)%0A r @@ -938,24 +938,24 @@ rse_args()%0A%0A - os.envir @@ -995,16 +995,231 @@ y_path%0A%0A + from sentry.utils.runner import configure%0A configure()%0A%0A from django.conf import settings%0A # Add in the sentry object models%0A from sentry.models import Organization, Project, ProjectKey, Team, User%0A%0A admi
918a168b53e9f026393aaa17347fc855f7e4a70a
add background task, remove extra roles code, use .format
files/devops/fabfile.py
files/devops/fabfile.py
# Fabfile from Quickstart # qkst.io/devops/fabfile from fabric.api import ( task, parallel, roles, run, local, sudo, put, env, settings ) from fabric.contrib.project import rsync_project from fabric.context_managers import cd, prefix from fabric.tasks import execute env.user = 'root' env.roledefs = { 'local': ['localhost:22'] } env.roledefs['all'] = [host for role in env.roledefs.values() for host in role] @task @roles('local') def setup(): sudo('apt-get update') sudo('apt-get install -y python python-pip python-virtualenv') run('pip install fabric') @task @parallel def install_deb(url): sudo('wget %s -O /tmp/download.deb' % url) sudo('dpkg -i /tmp/download.deb && rm /tmp/download.deb') @task def upload(local='./', remote='/tmp'): rsync_project( local_dir=local, remote_dir=remote, exclude=['.git'], extra_opts='-lp' # preserve symlinks and permissions ) @task def put_as_user(file, remote, user): with settings(user=user): put(file, remote) @task def context_demo(): with cd('/tmp'): run('touch testfile') with prefix('cd /tmp') run('rm testfile')
Python
0.000001
@@ -283,283 +283,353 @@ env. -user +hosts = +%5B 'root -'%0A%0Aenv.roledefs = %7B%0A 'local': %5B'localhost:22'%5D%0A%7D%0A%0Aenv.roledefs%5B'all'%5D = %5Bhost for role in env.roledefs.values() for host in role%5D%0A%0A%0A@task%0A@roles('local')%0Adef setup():%0A sudo('apt-get update')%0A sudo('apt-get install -y python python-pip python-virtualenv +@localhost:22'%5D%0A%0A%0A@task%0Adef bootstrap():%0A sudo('apt-get update')%0A sudo('apt-get install -y sysstat wget unzip htop dtach')%0A%0A%0A@task%0Adef start():%0A execute('service', 'cron')%0A%0A%0A@task%0Adef service(name, action='start'):%0A sudo('service %7B0%7D %7B1%7D %7C%7C true'.format(name, action))%0A%0A%0A@task%0Adef background(process, name='bgprocess ') +: %0A @@ -638,46 +638,79 @@ un(' -pip install fabric')%0A%0A%0A@task%0A@parallel +dtach -n %60mktemp -u /tmp/%7B0%7D.XXXXX%60 %7B1%7D'.format(process, name))%0A%0A%0A@task %0Adef @@ -747,10 +747,11 @@ get -%25s +%7B0%7D -O @@ -772,15 +772,21 @@ deb' - %25 +.format( url) +) %0A @@ -842,24 +842,131 @@ oad.deb')%0A%0A%0A +@task%0Adef status():%0A run('service --status-all')%0A run('vmstat')%0A run('df -h')%0A run('iostat')%0A%0A%0A @task%0Adef up @@ -1092,16 +1092,38 @@ =%5B'.git' +, '*.pyc', '.DS_Store' %5D,%0A @@ -1290,137 +1290,4 @@ te)%0A -%0A%0A@task%0Adef context_demo():%0A with cd('/tmp'):%0A run('touch testfile')%0A with prefix('cd /tmp')%0A run('rm testfile')%0A
a4f69decb2b22822660033265a6517510c8a2eb5
clean up some convert some strings to fstrings use fewer imports
cogs/utils.py
cogs/utils.py
# -*- coding: utf-8 -*- from discord.ext import commands from datetime import datetime from cogs.cog import Cog import discord class Utils(Cog): """The description for Utils goes here.""" @commands.command(name='reload', hidden=True) @commands.is_owner() async def cog_reload(self, ctx, *, cog: str): """Command which Reloads a Module. Remember to use dot path. e.g: cogs.owner""" try: self.bot.unload_extension(cog) self.bot.load_extension(cog) except Exception as e: await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}') else: await ctx.send('**`SUCCESS`**') @commands.command() async def ping(self, ctx): await ctx.send(f"Pong! time is {ctx.bot.latency * 1000:.2f)} ms") @commands.command() async def time(self,ctx): time = datetime.now().strftime("%a, %e %b %Y %H:%M:%S (%-I:%M %p)") await ctx.send(f'the time in alaska is {time}') @commands.command() @commands.is_owner() async def upload(self, ctx, file): with open(file, 'rb') as f: try: await ctx.send(file = discord.File(f, file)) except FileNotFoundError: await ctx.send(f"no such file: {file}") def setup(bot): bot.add_cog(Utils(bot))
Python
0.000523
@@ -789,17 +789,16 @@ 1000:.2f -) %7D ms%22)%0A%0A
8c0f1741f015c8ffd475a2639a113abcec8e6bba
Update copyright year.
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- # # Moulder documentation build configuration file, created by # sphinx-quickstart on Sun Dec 14 11:15:19 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.append(os.path.abspath('../python')) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] # Link to standard Python documentation intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None)} # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Sali lab web framework' copyright = u'2009-2014, Sali Lab' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'SVN' # The full version, including alpha/beta/rc tags. release = 'SVN' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['.build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = 'Web framework documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'saliwebdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'Saliweb.tex', ur'Web service Documentation', ur'Sali Lab', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
Python
0
@@ -1468,17 +1468,17 @@ 2009-201 -4 +5 , Sali L
ca26358be15da6fdb90113de98493b54965b82ef
Rename kwargs
flask_email/__init__.py
flask_email/__init__.py
# -*- coding: utf-8 -*- """ flask.ext.email ~~~~~~~~~~~~~ Flask extension for sending email. """ __version__ = '1.4.3' """ Tools for sending email. """ from flask import current_app as app from .utils import import_module # Imported for backwards compatibility, and for the sake # of a cleaner namespace. These symbols used to be in # django/core/mail.py before the introduction of email # backends and the subsequent reorganization (See #10355) from .utils import CachedDnsName, DNS_NAME from .message import ( EmailMessage, EmailMultiAlternatives, SafeMIMEText, SafeMIMEMultipart, DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid, BadHeaderError, forbid_multi_line_headers) def get_connection(backend=None, fail_silently=False, **kwds): """Load an email backend and return an instance of it. If backend is None (default) EMAIL_BACKEND is used. Both fail_silently and other keyword arguments are used in the constructor of the backend. """ path = backend or app.config.get('EMAIL_BACKEND', 'flask.ext.email.backends.locmem.Mail') try: mod_name, klass_name = path.rsplit('.', 1) mod = import_module(mod_name) except ImportError, e: raise Exception(('Error importing email backend module %s: "%s"' % (mod_name, e))) try: klass = getattr(mod, klass_name) except AttributeError: raise Exception(('Module "%s" does not define a ' '"%s" class' % (mod_name, klass_name))) return klass(app, fail_silently=fail_silently, **kwds) def send_mail(subject, message, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) return EmailMessage(subject, message, from_email, recipient_list, connection=connection).send() def send_mass_mail(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a datatuple of (subject, message, from_email, recipient_list), sends each message to each recipient list. Returns the number of emails sent. If from_email is None, the DEFAULT_FROM_EMAIL setting is used. If auth_user and auth_password are set, they're used to log in. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) messages = [EmailMessage(subject, message, sender, recipient) for subject, message, sender, recipient in datatuple] return connection.send_messages(messages) def mail_admins(subject, message, fail_silently=False, connection=None, html_message=None): """Sends a message to the admins, as defined by the ADMINS setting.""" if not app.config.get('ADMINS', None): return mail = EmailMultiAlternatives(u'%s%s' % (app.config.get('EMAIL_SUBJECT_PREFIX', '[Flask] '), subject), message, app.config.get('SERVER_EMAIL', 'root@localhost'), [a[1] for a in app.config['ADMINS']], connection=connection) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently) def mail_managers(subject, message, fail_silently=False, connection=None, html_message=None): """Sends a message to the managers, as defined by the MANAGERS setting.""" if not app.config.get('MANAGERS', None): return mail = EmailMultiAlternatives(u'%s%s' % (app.config.get('EMAIL_SUBJECT_PREFIX', '[Flask] '), subject), message, app.config.get('SERVER_EMAIL', 'root@localhost'), [a[1] for a in app.config['MANAGERS']], connection=connection) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently)
Python
0.001059
@@ -754,17 +754,19 @@ se, **kw -d +arg s):%0A @@ -1597,17 +1597,19 @@ ly, **kw -d +arg s)%0A%0A%0Adef
74ecefd4e67112c2a1e7f1a7e5c1ed3b9f441d52
Remove old call to connect()
flask_simon/__init__.py
flask_simon/__init__.py
__version__ = '0.2.0' from bson.errors import InvalidId from bson.objectid import ObjectId from flask import abort from pymongo import uri_parser from simon import Model, connection, geo, query from werkzeug.routing import BaseConverter, ValidationError __all__ = ('Simon', 'get_or_404', 'Model', 'connection', 'geo', 'query') class ObjectIDConverter(BaseConverter): """Convert Object IDs for use in view routing URLs.""" def to_python(self, value): try: return ObjectId(value) except (InvalidId, TypeError): raise ValidationError def to_url(self, value): return str(value) class Simon(object): """Automatically creates a connection for Simon models.""" def __init__(self, app=None, prefix='MONGO', alias=None): if app is not None: self.init_app(app, prefix, alias) def init_app(self, app, prefix='MONGO', alias=None): """Initializes the Flask app for use with Simon. This method will automatically be called if the app is passed into ``__init__()``. :param app: the Flask application. :type app: :class:`flask.Flask` :param prefix: (optional) the prefix of the config settings :type prefix: str :param alias: the alias to use for the database connection :type alias: str .. versionchanged:: 0.2.0 Added support for multiple databases .. versionadded:: 0.1.0 """ if 'simon' not in app.extensions: app.extensions['simon'] = {} app.url_map.converters['objectid'] = ObjectIDConverter def prefixed(name): """Prepends the prefix to the key name.""" return '{0}_{1}'.format(prefix, name) # The URI key is accessed a few times, so be lazy and only # generate the prefixed version once. uri_key = prefixed('URI') if uri_key in app.config: parsed = uri_parser.parse_uri(app.config[uri_key]) if not parsed.get('database'): message = '{0} does not contain a database name.' message = message.format(uri_key) raise ValueError(message) host = app.config[uri_key] name = app.config[prefixed('DBNAME')] = parsed['database'] username = app.config[prefixed('USERNAME')] = parsed['username'] password = app.config[prefixed('PASSWORD')] = parsed['password'] replica_set = parsed['options'].get('replicaset', None) app.config[prefixed('REPLICA_SET')] = replica_set connection.connect(host_or_uri=host, name=name, alias=alias, username=username, password=password, replicaSet=replica_set) else: host_key = prefixed('HOST') port_key = prefixed('PORT') name_key = prefixed('DBNAME') username_key = prefixed('USERNAME') password_key = prefixed('PASSWORD') replica_set_key = prefixed('REPLICA_SET') app.config.setdefault(host_key, 'localhost') app.config.setdefault(port_key, 27017) app.config.setdefault(name_key, app.name) app.config.setdefault(username_key, None) app.config.setdefault(password_key, None) app.config.setdefault(replica_set_key, None) host = app.config[host_key] port = app.config[port_key] name = app.config[name_key] username = app.config[username_key] password = app.config[password_key] replica_set = app.config[replica_set_key] host = '{0}:{1}'.format(host, port) connection.connect(host, name=name, alias=alias, username=username, password=password, replicaSet=replica_set) def get_or_404(model, *qs, **fields): """Finds and returns a single document, or raises a 404 exception. This method will find a single document within the specified model. If the specified query matches zero or multiple documents, a ``404 Not Found`` exception will be raised. :param model: the model class. :type model: :class:`simon.Model` :param \*qs: logical queries. :type \*qs: :class:`simon.query.Q` :param \*\*fields: keyword arguments specifying the query. :type \*\*fields: kwargs .. versionadded: 0.1.0 """ try: return model.get(*qs, **fields) except (model.NoDocumentFound, model.MultipleDocumentsFound): abort(404)
Python
0
@@ -2598,206 +2598,8 @@ _set -%0A%0A connection.connect(host_or_uri=host, name=name, alias=alias,%0A username=username, password=password,%0A replicaSet=replica_set) %0A
fd5cb81b39fd5d9f812dc3f01614f357034a5878
update docs copyright notice
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- import os.path import sys from sphinx import version_info as sphinx_version_info # Ensure we can import "mongoc" and "taglist" extension modules. sys.path.append(os.path.dirname(__file__)) extensions = [ 'mongoc', 'taglist', 'sphinx.ext.intersphinx', 'sphinx.ext.extlinks', ] # General information about the project. project = 'MongoDB C Driver' copyright = '2017, MongoDB, Inc' author = 'MongoDB, Inc' version_path = os.path.join(os.path.dirname(__file__), '..', 'VERSION_CURRENT') version = open(version_path).read().strip() release_path = os.path.join(os.path.dirname(__file__), '..', 'VERSION_RELEASED') release = open(release_path).read().strip() release_major, release_minor, release_patch = release.split('.') release_download = 'https://github.com/mongodb/mongo-c-driver/releases/download/{0}/mongo-c-driver-{0}.tar.gz'.format(release) rst_prolog = """ .. |release_major| replace:: %(release_major)s .. |release_minor| replace:: %(release_minor)s .. |release_patch| replace:: %(release_patch)s .. |release_download| replace:: https://github.com/mongodb/mongo-c-driver/releases/download/%(release)s/mongo-c-driver-%(release)s.tar.gz """ % locals() # The extension requires the "base" to contain '%s' exactly once, but we never intend to use it though extlinks = {'release': (release_download+'%s', '')} language = 'en' exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] master_doc = 'index' # don't fetch libbson's inventory from mongoc.org during build - Debian and # Fedora package builds must work offline - maintain a recent copy here intersphinx_mapping = { 'bson': ('http://mongoc.org/libbson/current', 'libbson-objects.inv'), } # -- Options for HTML output ---------------------------------------------- html_theme_path = ['.'] html_theme = 'mongoc-theme' html_title = html_shorttitle = 'MongoDB C Driver %s' % version # html_favicon = None if sphinx_version_info >= (1, 6): smart_quotes = False else: html_use_smartypants = False html_sidebars = { '**': ['globaltoc.html'], 'errors': [], # Make more room for the big table. 'mongoc_uri_t': [], # Make more room for the big table. } html_show_sourcelink = False # Note: http://www.sphinx-doc.org/en/1.5.1/config.html#confval-html_copy_source # This will degrade the Javascript quicksearch if we ever use it. html_copy_source = False # -- Options for manual page output --------------------------------------- # HACK: Just trick Sphinx's ManualPageBuilder into thinking there are pages # configured - we'll do it dynamically in process_nodes. man_pages = [True] # If true, show URL addresses after external links. # # man_show_urls = False from docutils.nodes import title # To publish HTML docs at GitHub Pages, create .nojekyll file. In Sphinx 1.4 we # could use the githubpages extension, but old Ubuntu still has Sphinx 1.3. def create_nojekyll(app, env): if app.builder.format == 'html': path = os.path.join(app.builder.outdir, '.nojekyll') with open(path, 'wt') as f: f.write('foo') def add_ga_javascript(app, pagename, templatename, context, doctree): if not app.env.config.analytics: return context['metatags'] = context.get('metatags', '') + """<script> (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push( {'gtm.start': new Date().getTime(),event:'gtm.js'} );var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= '//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer','GTM-JQHP'); </script>""" def add_canonical_link(app, pagename, templatename, context, doctree): link = ('<link rel="canonical"' ' href="http://mongoc.org/libbson/current/%s.html"/>' % pagename) context['metatags'] = context.get('metatags', '') + link def setup(app): app.connect('doctree-read', process_nodes) app.connect('env-updated', create_nojekyll) app.connect('html-page-context', add_ga_javascript) # Run sphinx-build -D analytics=1 to enable Google Analytics. app.add_config_value('analytics', False, 'html') app.connect('html-page-context', add_canonical_link) def process_nodes(app, doctree): if man_pages == [True]: man_pages.pop() env = app.env metadata = env.metadata[env.docname] # A page like installing.rst sets its name with ":man_page: mongoc_installing" page_name = metadata.get('man_page') if not page_name: print('Not creating man page for %s' % env.docname) return page_title = find_node(doctree, title) man_pages.append((env.docname, page_name, page_title.astext(), [author], 3)) def find_node(doctree, klass): matches = doctree.traverse(lambda node: isinstance(node, klass)) if not matches: raise IndexError("No %s in %s" % (klass, doctree)) return matches[0]
Python
0
@@ -400,16 +400,24 @@ = '2017 +-present , MongoD
923889f40e6706aa6d9f72d32eed94fb74e0e152
Remove non-existent directories from html_static_path
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- # # pylearn2 documentation build configuration file # It is based on Theano documentation build # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.append(os.path.abspath('some/directory')) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'numpydoc']#, 'ext'] #Needed otherwise, there is many autosummary error done by numpydo: #https://github.com/phn/pytpm/issues/3#issuecomment-12133978 numpydoc_show_class_members = False todo_include_todos = True try: from sphinx.ext import pngmath extensions.append('sphinx.ext.pngmath') except ImportError: pass # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.txt' # The master toctree document. master_doc = 'index' # General substitutions. project = 'Pylearn2' copyright = '2011, LISA lab' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = 'dev' # The full version, including alpha/beta/rc tags. release = 'dev' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directories, that shouldn't be searched # for source files. exclude_dirs = ['images', 'scripts', 'sandbox'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. #html_style = 'default.css' html_theme = 'sphinxdoc' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (within the static path) to place at the top of # the sidebar. #html_logo = 'images/theano_logo-200x67.png' #html_logo = 'images/theano_logo_allblue_200x46.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static', 'images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'theanodoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). latex_font_size = '11pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'pylearn2.tex', 'Pylearn2 Documentation', 'LISA lab, University of Montreal', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = 'images/snake_theta2-trans.png' latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
Python
0.000056
@@ -3996,16 +3996,20 @@ path = %5B +%5D # '.static
84a652025bbb0abffd4b0c7b464f267723084cff
Update TL.py
collect/TL.py
collect/TL.py
# printで表示する状態情報は半角6文字以内にすること import os import time import datetime import hashlib import urllib import sqlite3 import tweepy as tp import detector import oauth class StreamListener(tp.StreamListener): def __init__(self, api): """コンストラクタ""" self.api = api # 保存先 self.old_date = datetime.date.today() self.reset() def on_status(self, status): """UserStreamから飛んできたStatusを処理する""" # Tweetに画像がついているか is_media = False # 日付の確認 now = datetime.date.today() if now != self.old_date: self.old_date = now self.dbfile.commit() self.dbfile.close() self.reset() # TweetがRTかどうか if hasattr(status, "retweeted_status"): status = status.retweeted_status # Tweetが引用ツイートかどうか if hasattr(status, "quoted_status"): status = status.quoted_status # 複数枚の画像ツイートのとき if hasattr(status, "extended_entities"): if 'media' in status.extended_entities: status_media = status.extended_entities is_media = True # 一枚の画像ツイートのとき elif hasattr(status, "entities"): if 'media' in status.entities: status_media = status.entities is_media = True # 画像がついていたとき if is_media: # 自分のツイートは飛ばす(RT対策) if status.user.screen_name != "marron_general": for image in status_media['media']: if image['type'] != 'photo': break # URL, ファイル名 media_url = image['media_url'] filename = str(self.fileno).zfill(5) # ダウンロード try: temp_file = urllib.request.urlopen(media_url).read() except: print("Download Error") continue # md5の取得 current_md5 = hashlib.md5(temp_file).hexdigest() # すでに取得済みの画像は飛ばす if current_md5 in self.file_md5: print("geted : " + status.user.screen_name +"-" + filename) continue # 画像判定呼出 current_hash = None current_hash, facex, facey, facew, faceh = detector.face_2d(temp_file, status.user.screen_name, filename) if current_hash is not None: # すでに取得済みの画像は飛ばす overlaped = False for hash_key in self.file_hash: check = int(hash_key,16) ^ int(current_hash,16) count = bin(check).count('1') if count < 7: print("geted : " + status.user.screen_name +"-" + filename) overlaped = True break # 画像情報保存 if overlaped != True: # 取得済みとしてハッシュ値を保存 self.file_hash.append(current_hash) self.file_md5.append(current_md5) # ハッシュタグがあれば保存する tags = [] if hasattr(status, "entities"): if "hashtags" in status.entities: for hashtag in status.entities['hashtags']: tags.append(hashtag['text']) # データベースに保存 url = "https://twitter.com/" + status.user.screen_name + "/status/" + status.id_str self.dbfile.execute("insert into list(filename) values('" + filename + "')") self.dbfile.execute("update list set image = '" + media_url + "' where filename = '" + filename + "'") self.dbfile.execute("update list set username = '" + status.user.screen_name + "' where filename = '" + filename + "'") self.dbfile.execute("update list set url = '" + url + "' where filename = '" + filename + "'") self.dbfile.execute("update list set fav = " + str(status.favorite_count) + " where filename = '" + filename + "'") self.dbfile.execute("update list set retweet = " + str(status.retweet_count) + " where filename = '" + filename + "'") self.dbfile.execute("update list set tags = '" + str(tags).replace("'","") + "' where filename = '" + filename + "'") self.dbfile.execute("update list set time = '" + str(datetime.datetime.now()) + "' where filename = '" + filename + "'") self.dbfile.execute("update list set facex = '" + str(facex) + "' where filename = '" + filename + "'") self.dbfile.execute("update list set facey = '" + str(facey) + "' where filename = '" + filename + "'") self.dbfile.execute("update list set facew = '" + str(facew) + "' where filename = '" + filename + "'") self.dbfile.execute("update list set faceh = '" + str(faceh) + "' where filename = '" + filename + "'") self.dbfile.commit() # print("saved : " + status.user.screen_name + "-" + filename) # if tags != []: # print(" tags : " + str(tags)) sef.fileno += 1 temp_file = None def reset(self): """保存用のフォルダーを生成し、必要な変数を初期化する""" dbpath = os.path.abspath(__file__).replace(os.path.basename(__file__),self.old_date.isoformat() + ".db") if os.path.exists(dbpath): print("DB file exist") self.dbfile = sqlite3.connect(dbpath) cur = self.dbfile.cursor() cur.execute("select count(filename) from list") self.fileno = cur.fetchone()[0] cur.close() else: self.dbfile = sqlite3.connect(dbpath) self.dbfile.execute("create table list (filename, image, username, url, fav, retweet, tags, time, facex, facey, facew, faceh)") self.fileno = 0 self.file_hash = [] self.file_md5 = [] def main(): """メイン関数""" auth = oauth.get_oauth() stream = tp.Stream(auth, StreamListener(tp.API(auth)), secure=True) print('Start Streaming!') while True: # try: stream.userstream() # except KeyboardInterrupt: # exit() # except UnicodeEncodeError as err: # print("UnicodeEncodeError: {0}".format(err)) # except: # print('UserStream Error') # time.sleep(60) if __name__ == '__main__': main()
Python
0.000001
@@ -5671,16 +5671,17 @@ se +l f.fileno
ba81222c33b4b80c5148c21bb30c60412c85847b
Fix search query
files/kernel-cleanup.py
files/kernel-cleanup.py
#!/usr/bin/env python2.7 """ kernel-cleanup.py Find all installed kernel-related packages and mark them as automatically installed. Then, purge those of these packages that APT now considers auto-removable. Ubuntu APT has logic that prevents us from removing all kernels this way. As an additional safeguard, we always avoid purging the currently running kernel from this script. """ import apt import os os.environ["PATH"] = "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin" class SourcePackageFilter(apt.cache.Filter): def __init__(self, source_packages): self.spkgs = source_packages def apply(self, pkg): if pkg.is_installed: if pkg.installed.source_name in self.spkgs: return True return False class SignedKernelFilter(apt.cache.Filter): def apply(self, pkg): return bool(pkg.is_installed and pkg.section == "kernel" and pkg.name.startswith("linux-signed")) class KernelCleaner(object): def __init__(self): self.c = apt.cache.Cache() def get_kernels(self): return self.c.get_providing_packages("linux-image") def get_tracks(self): return set([(pkg.installed or pkg.candidate).source_name for pkg in self.get_kernels()]) def get_kernel_packages(self): packages = apt.cache.FilteredCache(self.c) packages.set_filter(SourcePackageFilter(self.get_tracks())) return packages def get_signed_kernel_packages(self): packages = apt.cache.FilteredCache(self.c) packages.set_filter(SignedKernelFilter()) return packages def mark_kernels_auto(self): for pkg in self.get_kernel_packages(): pkg.mark_auto() self.c.commit() def purge_signed_kernels(self): for pkg in self.get_signed_kernel_packages(): pkg.mark_delete(auto_fix=False, purge=True) self.c.commit() def purge_old_kernels(self): release = os.uname()[2] for pkg in self.get_kernel_packages(): if release not in pkg.name: if pkg.is_auto_removable: pkg.mark_delete(auto_fix=False, purge=True) self.c.commit() def main(): kc = KernelCleaner() kc.purge_signed_kernels() kc.mark_kernels_auto() kc.purge_old_kernels() if __name__ == "__main__": main()
Python
0.999382
@@ -901,19 +901,20 @@ ion -== +in ( %22kernel%22 and @@ -909,16 +909,26 @@ %22kernel%22 +, %22utils%22) and pkg
1c17b4b10374129d9e26f7023a93ea587dfe7fc7
update version number to 1.0.10-pre as prep for staging/release
findingaids/__init__.py
findingaids/__init__.py
__version_info__ = (1, 0, 9, None) # Dot-connect all but the last. Last is dash-connected if not None. __version__ = '.'.join(str(i) for i in __version_info__[:-1]) if __version_info__[-1] is not None: __version__ += ('-%s' % (__version_info__[-1],)) #THIS IS DUPLICATE CODE FROM DWRANGLER AND SHOULD EVENTUALLY BE MOVED INTO EULCORE # Extends the normal render_to_response to include RequestContext objects. # Taken from http://www.djangosnippets.org/snippets/3/ # Other similar implementations and adaptations http://lincolnloop.com/blog/2008/may/10/getting-requestcontext-your-templates/ # I also added the SCRIPT_NAME to dictionary so it would be available to templates # Since I always uset his for this application it makes sense for this app but # I'm unsure this is the best way overall. def render_with_context(req, *args, **kwargs): kwargs['context_instance'] = RequestContext(req, {'script_name': req.META['SCRIPT_NAME']}) # Line below was an attempt to add script name to the context so I could # deal with template paths for the SITE_URL in a way that handled # apps being installed in a site subURL. # args[1]['script_name'] = req.META['SCRIPT_NAME'] return render_to_response(*args, **kwargs)
Python
0
@@ -23,15 +23,17 @@ 0, -9, None +10, 'pre' )%0A%0A# @@ -798,16 +798,95 @@ verall.%0A +# TODO: update to use new render shortcut provided in newer versions of django%0A def rend
d0ee1301ff85c9975946203bd2b52ba03ddae2d0
Fix deferred initialization of app object.
flask_dynamo/manager.py
flask_dynamo/manager.py
"""Main Flask integration.""" from os import environ from boto.dynamodb2 import connect_to_region from boto.dynamodb2.table import Table from flask import ( _app_ctx_stack as stack, ) from .errors import ConfigurationError class Dynamo(object): """DynamoDB wrapper for Flask.""" DEFAULT_REGION = 'us-east-1' def __init__(self, app=None): """ Initialize this extension. :param obj app: The Flask application (optional). """ self.app = app if app is not None: self.init_app(app) def init_app(self, app): """ Initialize this extension. :param obj app: The Flask application. """ self.init_settings() self.check_settings() def init_settings(self): """Initialize all of the extension settings.""" self.app.config.setdefault('DYNAMO_TABLES', []) self.app.config.setdefault('DYNAMO_ENABLE_LOCAL', environ.get('DYNAMO_ENABLE_LOCAL', False)) self.app.config.setdefault('DYNAMO_LOCAL_HOST', environ.get('DYNAMO_LOCAL_HOST')) self.app.config.setdefault('DYNAMO_LOCAL_PORT', environ.get('DYNAMO_LOCAL_PORT')) self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID')) self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY')) self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION)) def check_settings(self): """ Check all user-specified settings to ensure they're correct. We'll raise an error if something isn't configured properly. :raises: ConfigurationError """ if self.app.config['AWS_ACCESS_KEY_ID'] and not self.app.config['AWS_SECRET_ACCESS_KEY']: raise ConfigurationError('You must specify AWS_SECRET_ACCESS_KEY if you are specifying AWS_ACCESS_KEY_ID.') if self.app.config['AWS_SECRET_ACCESS_KEY'] and not self.app.config['AWS_ACCESS_KEY_ID']: raise ConfigurationError('You must specify AWS_ACCESS_KEY_ID if you are specifying AWS_SECRET_ACCESS_KEY.') if self.app.config['DYNAMO_ENABLE_LOCAL'] and not (self.app.config['DYNAMO_LOCAL_HOST'] and self.app.config['DYNAMO_LOCAL_PORT']): raise ConfigurationError('If you have enabled Dynamo local, you must specify the host and port.') @property def connection(self): """ Our DynamoDB connection. This will be lazily created if this is the first time this is being accessed. This connection is reused for performance. """ ctx = stack.top if ctx is not None: if not hasattr(ctx, 'dynamo_connection'): kwargs = { 'host': self.app.config['DYNAMO_LOCAL_HOST'] if self.app.config['DYNAMO_ENABLE_LOCAL'] else None, 'port': int(self.app.config['DYNAMO_LOCAL_PORT']) if self.app.config['DYNAMO_ENABLE_LOCAL'] else None, 'is_secure': False if self.app.config['DYNAMO_ENABLE_LOCAL'] else True, } # Only apply if manually specified: otherwise, we'll let boto # figure it out (boto will sniff for ec2 instance profile # credentials). if self.app.config['AWS_ACCESS_KEY_ID']: kwargs['aws_access_key_id'] = self.app.config['AWS_ACCESS_KEY_ID'] if self.app.config['AWS_SECRET_ACCESS_KEY']: kwargs['aws_secret_access_key'] = self.app.config['AWS_SECRET_ACCESS_KEY'] # If DynamoDB local is disabled, we'll remove these settings. if not kwargs['host']: del kwargs['host'] if not kwargs['port']: del kwargs['port'] ctx.dynamo_connection = connect_to_region(self.app.config['AWS_REGION'], **kwargs) return ctx.dynamo_connection @property def tables(self): """ Our DynamoDB tables. These will be lazily initializes if this is the first time the tables are being accessed. """ ctx = stack.top if ctx is not None: if not hasattr(ctx, 'dynamo_tables'): ctx.dynamo_tables = {} for table in self.app.config['DYNAMO_TABLES']: table.connection = self.connection ctx.dynamo_tables[table.table_name] = table if not hasattr(ctx, 'dynamo_table_%s' % table.table_name): setattr(ctx, 'dynamo_table_%s' % table.table_name, table) return ctx.dynamo_tables def __getattr__(self, name): """ Override the get attribute built-in method. This will allow us to provide a simple table API. Let's say a user defines two tables: `users` and `groups`. In this case, our customization here will allow the user to access these tables by calling `dynamo.users` and `dynamo.groups`, respectively. :param str name: The DynamoDB table name. :rtype: object :returns: A Table object if the table was found. :raises: AttributeError on error. """ if name in self.tables: return self.tables[name] raise AttributeError('No table named %s found.' % name) def create_all(self): """ Create all user-specified DynamoDB tables. We'll error out if the tables can't be created for some reason. """ for table_name, table in self.tables.iteritems(): Table.create( table_name = table.table_name, schema = table.schema, throughput = table.throughput, indexes = table.indexes, global_indexes = table.global_indexes, connection = self.connection, ) def destroy_all(self): """ Destroy all user-specified DynamoDB tables. We'll error out if the tables can't be destroyed for some reason. """ for table_name, table in self.tables.iteritems(): table.delete()
Python
0
@@ -684,32 +684,55 @@ on.%0A %22%22%22%0A + self.app = app%0A self.ini
d516e43e04d51852de6fe4aaeaa00ea75211bda7
add - api methods to pass along endorse/exclude counts
flickipedia/web/rest.py
flickipedia/web/rest.py
""" Defines restful interface to backend """ from flickipedia.mysqlio import DataIOMySQL from flickipedia.config import schema from flickipedia.config import log from flickipedia.model.articles import ArticleModel from flickipedia.model.photos import PhotoModel from flickipedia.model.likes import LikeModel from flickipedia.model.exclude import ExcludeModel def api_insert_article(wiki_page_id, article_name): """ Adds an article """ raise NotImplementedError() def api_insert_photo(flickr_id, article_id): """ Adds a photo """ raise NotImplementedError() def api_set_like(uid, pid, aid): """ Toggles the like-glyph value for the given triplet :param uid: Flickipedia user id :param pid: Flickipedia photo id :param aid: Flickipedia article id :return: True on success, False otherwise """ io = DataIOMySQL() io.connect() result = api_get_like(uid, pid, aid) # toggle and set new value (delete row if it doesn't exist) if result: # io.update false try: io.delete(result) except Exception as e: log.error(' "%s"' % e.message) return False else: # io.update true try: io.insert('Like', user_id=uid, photo_id=pid, article_id=aid) except Exception as e: log.error(' "%s"' % e.message) return False return True def api_get_like(uid, pid, aid): """ Determines the like-glyph value for the given triplet :param uid: Flickipedia user id :param pid: Flickipedia photo id :param aid: Flickipedia article id :return: 'Like' row if exists, None otherwise """ io = DataIOMySQL() io.connect() schema_obj = getattr(schema, 'Likes') # Query to extract res = io.session.query(schema_obj, schema_obj.is_set).filter( schema_obj.photo_id == pid, schema_obj.article_id == aid, schema_obj.user_id == uid ).limit(1).all() if len(res) == 0: log.error('REST \'api_get_glyph\': Couldn\'t find (' 'user="%s", photo_id=%s, article_id=%s)' % ( uid, pid, aid)) return None else: return res[0] def api_method_endorse_event(article_id, user_id, photo_id): """model logic for photo endorse :param article_id: article local id :param user_id: user id :param photo_id: photo local id """ lm = LikeModel() like = lm.get_like(user_id, article_id, photo_id) if like: lm.delete_like(like) else: lm.insert_like(user_id, article_id, photo_id) def api_method_endorse_fetch(article_id, user_id, photo_id): """model logic for photo endorse fetch :param article_id: article local id :param user_id: user id :param photo_id: photo local id """ lm = LikeModel() like = lm.get_like(user_id, article_id, photo_id) res = 1 if like else 0 return res def api_method_exclude_event(article_id, user_id, photo_id): """model logic for photo exclude :param article_id: article local id :param user_id: user id :param photo_id: photo local id """ em = ExcludeModel() exclude = em.get_exclude(user_id, article_id, photo_id) if exclude: em.delete_exclude(exclude) else: em.insert_exclude(user_id, article_id, photo_id) def api_method_exclude_fetch(article_id, user_id, photo_id): """model logic for photo exclude fetch :param article_id: article local id :param user_id: user id :param photo_id: photo local id """ em = ExcludeModel() exclude = em.get_exclude(user_id, article_id, photo_id) res = 1 if exclude else 0 return res
Python
0
@@ -3759,28 +3759,612 @@ clude else 0%0A return res%0A +%0A%0Adef api_method_endorse_count(article_id, photo_id):%0A %22%22%22model logic for producing photo endorse count%0A%0A :param article_id: article local id%0A :param photo_id: photo local id%0A %22%22%22%0A lm = LikeModel()%0A return lm.get_likes_article_photo(article_id, photo_id, count=True)%0A%0A%0Adef api_method_exclude_count(article_id, photo_id):%0A %22%22%22model logic for producing photo exclude count%0A%0A :param article_id: article local id%0A :param photo_id: photo local id%0A %22%22%22%0A em = ExcludeModel()%0A return em.get_excludes_article_photo(article_id, photo_id, count=True)%0A
8700bcaabc2470849a47383c991c37a886da1b4a
add profiler
corehq/apps/data_interfaces/dispatcher.py
corehq/apps/data_interfaces/dispatcher.py
from django.utils.decorators import method_decorator from corehq import privileges from corehq.apps.accounting.decorators import requires_privilege_with_fallback from corehq.apps.reports.dispatcher import ReportDispatcher, ProjectReportDispatcher, datespan_default from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from django_prbac.exceptions import PermissionDenied from django_prbac.utils import ensure_request_has_privilege require_can_edit_data = require_permission(Permissions.edit_data) class DataInterfaceDispatcher(ProjectReportDispatcher): prefix = 'data_interface' map_name = 'DATA_INTERFACES' def dispatch(self, request, *args, **kwargs): from corehq.apps.reports.standard.export import DeidExportReport if kwargs['report_slug'] in [DeidExportReport.slug]: return self.deid_dispatch(request, *args, **kwargs) return super(DataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) @method_decorator(requires_privilege_with_fallback(privileges.DEIDENTIFIED_DATA)) def deid_dispatch(self, request, *args, **kwargs): return super(DataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) def permissions_check(self, report, request, domain=None, is_navigation_check=False): if is_navigation_check: from corehq.apps.reports.standard.export import DeidExportReport if report.split('.')[-1] in [DeidExportReport.__name__]: try: ensure_request_has_privilege(request, privileges.DEIDENTIFIED_DATA) except PermissionDenied: return False return super(DataInterfaceDispatcher, self).permissions_check(report, request, domain) class EditDataInterfaceDispatcher(ReportDispatcher): prefix = 'edit_data_interface' map_name = 'EDIT_DATA_INTERFACES' @method_decorator(require_can_edit_data) @datespan_default def dispatch(self, request, *args, **kwargs): from corehq.apps.importer.base import ImportCases if kwargs['report_slug'] in [ImportCases.slug]: return self.bulk_dispatch(request, *args, **kwargs) return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) @method_decorator(requires_privilege_with_fallback(privileges.BULK_CASE_MANAGEMENT)) def bulk_dispatch(self, request, *args, **kwargs): return super(EditDataInterfaceDispatcher, self).dispatch(request, *args, **kwargs) def permissions_check(self, report, request, domain=None, is_navigation_check=False): if is_navigation_check: from corehq.apps.importer.base import ImportCases if report.split('.')[-1] in [ImportCases.__name__]: try: ensure_request_has_privilege(request, privileges.BULK_CASE_MANAGEMENT) except PermissionDenied: return False return request.couch_user.can_edit_data(domain)
Python
0.000002
@@ -481,16 +481,63 @@ vilege%0A%0A +from dimagi.utils.decorators.profile import *%0A%0A require_ @@ -712,24 +712,87 @@ NTERFACES'%0A%0A + @profile(%22/home/sravfeyn/src/hotshot-logfiles/users.prof%22)%0A def disp
61e67ed5740148f74e67aef09afc65ef1c3fd6a8
Handle commands in a very trivial way
hackday_bot/bot.py
hackday_bot/bot.py
"""hackday_bot.bot module.""" import logging import time from prawcore.exceptions import PrawcoreException logger = logging.getLogger(__package__) class Bot(object): """Bot manages comments made to the specified subreddit.""" def __init__(self, subreddit): """Initialize an instance of Bot. :param subreddit: The subreddit to monitor for new comments. """ self.subreddit = subreddit def _handle_comment(self, comment): logger.info(comment) def run(self): """Run the bot indefinitely.""" running = True subreddit_url = '{}{}'.format(self.subreddit._reddit.config.reddit_url, self.subreddit.url) logger.info('Watching for comments on: {}'.format(subreddit_url)) while running: try: for comment in self.subreddit.stream.comments(): self._handle_comment(comment) except KeyboardInterrupt: logger.info('Termination received. Goodbye!') running = False except PrawcoreException: logger.exception('run loop') time.sleep(10) return 0
Python
0.00022
@@ -38,16 +38,26 @@ logging%0A +import re%0A import t @@ -112,16 +112,208 @@ eption%0A%0A +AVAILABLE_COMMANDS = ('help', 'interested', 'join', 'leave', 'uninterested')%0ACOMMAND_RE = re.compile(r'(?:%5CA%7C%5Cs)!(%7B%7D)(?=%5Cs%7C%5CZ)'%0A .format('%7C'.join(AVAILABLE_COMMANDS)))%0A%0A %0Alogger @@ -639,66 +639,897 @@ ef _ -handle_comment(self, comment):%0A logger.info(comment +command_help(self, comment):%0A comment.reply('help text will go here')%0A%0A def _command_interested(self, comment):%0A comment.reply('soon I will record your interest')%0A%0A def _command_join(self, comment):%0A comment.reply('soon I will record your sign up')%0A%0A def _command_leave(self, comment):%0A comment.reply('soon I will record your abdication')%0A%0A def _command_uninterested(self, comment):%0A comment.reply('soon I will record your uninterest')%0A%0A def _handle_comment(self, comment):%0A commands = set(COMMAND_RE.findall(comment.body))%0A if len(commands) %3E 1:%0A comment.reply('Please provide only a single command.')%0A elif len(commands) == 1:%0A command = commands.pop()%0A getattr(self, '_command_%7B%7D'.format(command))(comment)%0A logger.debug('Handled %7B%7D by %7B%7D'.format(command, comment.author) )%0A%0A
8c519c3d91e7bb9acf7f2bfedbf97c7b2a911a14
add host and port params to Emulator
anom/testing/emulator.py
anom/testing/emulator.py
import logging import os import re import signal import shlex import subprocess from queue import Empty, Queue from threading import Thread #: The command to run in order to start the emulator. _emulator_command = "gcloud beta emulators datastore start --consistency={consistency:0.2f} --no-store-on-disk" #: The regexp that is used to search for env vars in the emulator output. _env_var_re = re.compile(r"export ([^=]+)=(.+)") #: The string that is used to determine when the Emulator has finished starting up. _log_marker = "Dev App Server is now running" class Emulator: """Runs the Cloud Datastore emulator in a subprocess for testing purposes. Parameters: consistency(float): A value between 0.0 and 1.0 representing the percentage of datastore requests that should succeed. Example:: from anom.testing import Emulator @pytest.fixture(scope="session") def emulator(): emulator = Emulator() emulator.start(inject=True) yield emulator.stop() """ def __init__(self, *, consistency=1): self._emulator_command = shlex.split(_emulator_command.format( consistency=consistency )) self._logger = logging.getLogger("Emulator") self._proc = None self._queue = Queue() self._thread = Thread(target=self._run, daemon=True) def start(self, *, timeout=15, inject=False): """Start the emulator process and wait for it to initialize. Parameters: timeout(int): The maximum number of seconds to wait for the Emulator to start up. inject(bool): Whether or not to inject the emulator env vars into the current process. Returns: dict: A dictionary of env vars that can be used to access the Datastore emulator. """ try: self._thread.start() env_vars = self._queue.get(block=True, timeout=timeout) if inject: os.environ.update(env_vars) return env_vars except Empty: # pragma: no cover raise RuntimeError("Timed out while waiting for Emulator to start up.") def stop(self): """Stop the emulator process. Returns: int: The process return code or None if the process isn't currently running. """ if self._proc is not None: if self._proc.poll() is None: try: os.killpg(self._proc.pid, signal.SIGTERM) _, returncode = os.waitpid(self._proc.pid, 0) self._logger.debug("Emulator process exited with code %d.", returncode) return returncode except ChildProcessError: # pragma: no cover return self._proc.returncode return self._proc.returncode # pragma: no cover return None # pragma: no cover def _run(self): self._proc = subprocess.Popen( self._emulator_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid, ) env_vars = {} while self._proc.poll() is None: line = self._proc.stdout.readline().strip().decode("utf-8") self._logger.debug(line) match = _env_var_re.search(line) if match: name, value = match.groups() env_vars[name] = value # If no env vars were found this will eventually cause # `start` to time out which is what we want since running # tests w/o the env vars set up could prove dangerous. if _log_marker in line and env_vars: self._queue.put(env_vars)
Python
0
@@ -281,16 +281,42 @@ cy:0.2f%7D + --host-port=%7Bhost%7D:%7Bport%7D --no-st @@ -327,16 +327,24 @@ on-disk%22 + # noqa %0A%0A#: The @@ -701,24 +701,150 @@ Parameters:%0A + host(str): The host name the emulator should bind to.%0A port(int): The port on which the emulator should listen on.%0A consis @@ -1220,16 +1220,45 @@ self, *, + host=%22127.0.0.1%22, port=9898, consist @@ -1337,16 +1337,50 @@ format(%0A + host=host, port=port,%0A
c18e43769b50dff68defb6a8d55dec89824d3d55
Use PyQt5 instead of PySide
src/qt_display_video.py
src/qt_display_video.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # (C) 2016 Jean Nassar # Released under BSD version 4 """ A Qt display window. Displays a video feed, and can also show the drone state and connection problems in the status bar. """ import sys from threading import Lock from PySide import QtCore, QtGui import rospy from sensor_msgs.msg import Image from ardrone_autonomy.msg import Navdata from drone_state import DroneState CONNECTION_CHECK_PERIOD = 250 # ms GUI_UPDATE_PERIOD = 20 # ms class DroneVideoDisplay(QtGui.QMainWindow): state_messages = { DroneState.Emergency: "Emergency", DroneState.Inited: "Initialized", DroneState.Landed: "Landed", DroneState.Flying: "Flying", DroneState.Hovering: "Hovering", DroneState.Test: "Test", DroneState.TakingOff: "Taking Off", DroneState.GotoHover: "Going to Hover Mode", DroneState.Landing: "Landing", DroneState.Looping: "Looping" } msg_disconnected = "Disconnected" msg_unknown = "Unknown State" msg_status_template = "{state} (Battery: {battery:.0f})%" def __init__(self): super(DroneVideoDisplay, self).__init__() # Setup GUI - a label which fills the whole window and holds our image self.setWindowTitle(rospy.get_param("~window_name", "AR.Drone Video Feed")) self.image_box = QtGui.QLabel(self) self.setCentralWidget(self.image_box) rospy.Subscriber("/ardrone/navdata", Navdata, self.receive_navdata) rospy.Subscriber("image", Image, self.receive_image, queue_size=1) # Hold image frame received from drone for processing by GUI self.image = None self._image_lock = Lock() # Holds the status message to be displayed on the next GUI update self.msg_status_bar = "" # Tracks whether we have received data since the last connection check self._comm_since_timer = False # A timer to check whether we"re still connected self.timer_connection = QtCore.QTimer(self) self.timer_connection.timeout.connect(self.cbk_connection) self.timer_connection.start(CONNECTION_CHECK_PERIOD) # A timer to redraw the GUI self.timer_redraw = QtCore.QTimer(self) self.timer_redraw.timeout.connect(self.cbk_redraw) self.timer_redraw.start(GUI_UPDATE_PERIOD) @property def is_connected(self): return self._comm_since_timer def cbk_connection(self): """ Called every CONNECTION_CHECK_PERIOD. If we haven"t received anything since the last callback, we will assume that we are having network troubles and display a message in the status bar. """ self._comm_since_timer = False def cbk_redraw(self): if self.image is not None: with self._image_lock: # Convert the ROS image into a QImage which we can display image = QtGui.QPixmap.fromImage( QtGui.QImage(self.image.data, self.image.width, self.image.height, QtGui.QImage.Format_RGB888)) # Further processing can be done here. self.resize(image.width(), image.height()) self.image_box.setPixmap(image) # Update the status bar self.statusBar().showMessage( self.msg_status_bar if self.is_connected else self.msg_disconnected) def receive_image(self, data): self._comm_since_timer = True with self._image_lock: self.image = data # Save ros image for processing by display thread def receive_navdata(self, navdata): self._comm_since_timer = True # Update the message to be displayed state = self.state_messages.get(navdata.state, self.msg_unknown) self.msg_status_bar = self.msg_status_template.format( state=state, battery=navdata.batteryPercent, ) def main(): rospy.init_node("ardrone_video_display") app = QtGui.QApplication(sys.argv) display = DroneVideoDisplay() display.show() app.exec_() rospy.spin() if __name__ == '__main__': main()
Python
0
@@ -275,12 +275,11 @@ m Py -Side +Qt5 imp @@ -295,16 +295,27 @@ e, QtGui +, QtWidgets %0A%0Aimport @@ -527,19 +527,23 @@ splay(Qt -Gui +Widgets .QMainWi @@ -1429,19 +1429,23 @@ box = Qt -Gui +Widgets .QLabel( @@ -4117,19 +4117,23 @@ app = Qt -Gui +Widgets .QApplic
61cf4e2feb3d8920179e28719822c7fb34ea6550
Add defaults to the ibm RNG
3/ibm_rng.py
3/ibm_rng.py
def ibm_rng(x1, a, c, m): x = x1 while True: x = (a * x + c) % m yield x / (m-1) def main(): rng = ibm_rng(1, 65539, 0, 2**31) while True: x = next(rng) print(x) if __name__ == '__main__': main()
Python
0.000002
@@ -14,14 +14,28 @@ 1, a +=65539 , c +=0 , m +=2**31 ):%0A
776c2992b64911f86740cdf0af4f05c7587430c7
Bump version
hbmqtt/__init__.py
hbmqtt/__init__.py
# Copyright (c) 2015 Nicolas JOUANIN # # See the file license.txt for copying permission. VERSION = (0, 9, 4, 'final', 0)
Python
0
@@ -105,17 +105,17 @@ 9, -4, 'final +5, 'alpha ', 0
6cd42058f3a2054e10b58a1a27a45481e608a23b
Refactor require.deb.key() to avoid code duplication [#141]
fabtools/require/deb.py
fabtools/require/deb.py
""" Debian packages =============== This module provides high-level tools for managing Debian/Ubuntu packages and repositories. """ from __future__ import with_statement from fabric.api import hide, run, settings from fabric.utils import puts from fabtools.deb import ( add_apt_key, apt_key_exists, install, is_installed, uninstall, update_index, ) from fabtools.files import is_file, watch from fabtools.system import distrib_codename from fabtools.utils import run_as_root def key(keyid, filename=None, url=None, keyserver='subkeys.pgp.net', update=False): """ Require a PGP key for APT. :: from fabtools import require # Varnish signing key from URL require.deb.key('C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt') # Nginx signing key from default key server (subkeys.pgp.net) require.deb.key('7BD9BF62') # From custom key server require.deb.key('7BD9BF62', keyserver='keyserver.ubuntu.com') # From file require.deb.key('7BD9BF62', filename='nginx.asc' """ # Command extracted from apt-key source gpg_cmd = 'gpg --ignore-time-conflict --no-options --no-default-keyring --keyring /etc/apt/trusted.gpg' with settings(hide('everything'), warn_only=True): key_exists = not run('%(gpg_cmd)s --fingerprint %(keyid)s' % locals()).return_code if not key_exists: add_apt_key(keyid=keyid, filename=filename, url=url, keyserver=keyserver, update=update) def source(name, uri, distribution, *components): """ Require a package source. :: from fabtools import require # Official MongoDB packages require.deb.source('mongodb', 'http://downloads-distro.mongodb.org/repo/ubuntu-upstart', 'dist', '10gen') """ from fabtools.require import file as require_file path = '/etc/apt/sources.list.d/%(name)s.list' % locals() components = ' '.join(components) source_line = 'deb %(uri)s %(distribution)s %(components)s\n' % locals() with watch(path) as config: require_file(path=path, contents=source_line, use_sudo=True) if config.changed: puts('Added APT repository: %s' % source_line) update_index() def ppa(name): """ Require a `PPA`_ package source. Example:: from fabtools import require # Node.js packages by Chris Lea require.deb.ppa('ppa:chris-lea/node.js') .. _PPA: https://help.launchpad.net/Packaging/PPA """ assert name.startswith('ppa:') user, repo = name[4:].split('/', 2) distrib = distrib_codename() source = '%(user)s-%(repo)s-%(distrib)s.list' % locals() if not is_file(source): package('python-software-properties') run_as_root('add-apt-repository %s' % name, pty=False) update_index() def package(pkg_name, update=False, version=None): """ Require a deb package to be installed. Example:: from fabtools import require # Require a package require.deb.package('foo') # Require a specific version require.deb.package('firefox', version='11.0+build1-0ubuntu4') """ if not is_installed(pkg_name): install(pkg_name, update=update, version=version) def packages(pkg_list, update=False): """ Require several deb packages to be installed. Example:: from fabtools import require require.deb.packages([ 'foo', 'bar', 'baz', ]) """ pkg_list = [pkg for pkg in pkg_list if not is_installed(pkg)] if pkg_list: install(pkg_list, update) def nopackage(pkg_name): """ Require a deb package to be uninstalled. Example:: from fabtools import require require.deb.nopackage('apache2') """ if is_installed(pkg_name): uninstall(pkg_name) def nopackages(pkg_list): """ Require several deb packages to be uninstalled. Example:: from fabtools import require require.deb.nopackages([ 'perl', 'php5', 'ruby', ]) """ pkg_list = [pkg for pkg in pkg_list if is_installed(pkg)] if pkg_list: uninstall(pkg_list)
Python
0
@@ -170,51 +170,8 @@ nt%0A%0A -from fabric.api import hide, run, settings%0A from @@ -1057,328 +1057,41 @@ %22%22%0A%0A - # Command extracted from apt-key source%0A gpg_cmd = 'gpg --ignore-time-conflict --no-options --no-default-keyring --keyring /etc/apt/trusted.gpg'%0A%0A with settings(hide('everything'), warn_only=True):%0A key_exists = not run('%25(gpg_cmd)s --fingerprint %25(keyid)s' %25 locals()).return_code%0A if not key_exists +%0A if not apt_key_exists(keyid) :%0A
a7bc77727f972b69052079f613a781c76de8bd29
fix pep8
frigg/helpers/github.py
frigg/helpers/github.py
# -*- coding: utf8 -*- import json import requests from django.conf import settings def get_pull_request_url(build): if build.pull_request_id > 0: return 'https://github.com/%s/%s/pull/%s' % (build.project.owner, build.project.name, build.pull_request_id) return 'https://github.com/%s/%s' % (build.project.owner, build.project.name) def get_commit_url(build): return 'https://github.com/%s/%s/commit/%s/' % ( build.project.owner, build.project.name, build.sha ) def list_collaborators(project): url = 'repos/%s/%s/collaborators' % (project.owner, project.name) data = json.loads(api_request(url, project.github_token).text) return [collaborator['login'] for collaborator in data] def set_commit_status(build, pending=False, error=None, context='frigg'): if settings.DEBUG or getattr(settings, 'STAGING', False): return url = "repos/%s/%s/statuses/%s" % (build.project.owner, build.project.name, build.sha) if context == 'frigg': status, description = _get_status_from_build(build, pending, error) target_url = build.get_absolute_url() elif context == 'frigg-preview': status, description = _get_status_from_deployment(build, pending, error) target_url = build.deployment.get_deployment_url() else: raise RuntimeError('Unknown context') return api_request(url, build.project.github_token, { 'state': status, 'target_url': target_url, 'description': description, 'context': 'continuous-integration/{0}'.format(context) }) def update_repo_permissions(user): from frigg.builds.models import Project repos = list_user_repos(user) for org in list_organization(user): repos += list_organization_repos(user.github_token, org['login']) for repo in repos: try: project = Project.objects.get(owner=repo['owner']['login'], name=repo['name']) project.members.add(user) except Project.DoesNotExist: pass def list_user_repos(user): page = 1 output = [] response = api_request('user/repos', user.github_token) output += json.loads(response.text) while response.headers.get('link') and 'next' in response.headers.get('link'): page += 1 response = api_request('user/repos', user.github_token, page=page) output += json.loads(response.text) return output def list_organization(user): return json.loads(api_request('user/orgs', user.github_token).text) def list_organization_repos(token, org): page = 1 output = [] response = api_request('orgs/%s/repos' % org, token) output += json.loads(response.text) while response.headers.get('link') and 'next' in response.headers.get('link'): page += 1 response = api_request('orgs/%s/repos' % org, token, page=page) output += json.loads(response.text) return output def _get_status_from_build(build, pending, error): if pending: status = 'pending' description = "Frigg started the build." else: if error is None: description = 'The build finished.' if build.result.succeeded: status = 'success' else: status = 'failure' else: status = 'error' description = "The build errored: %s" % error return status, description def _get_status_from_deployment(build, pending, error): if pending: status = 'pending' description = 'Frigg started to deploy the preview.' else: if error is None: description = 'Preview is deployed to {0}.'.format(build.deployment.get_deployment_url()) if build.deployment.succeeded: status = 'success' else: status = 'failure' else: status = 'error' description = "The preview deployment errored: %s" % error return status, description def api_request(url, token, data=None, page=None): url = "https://api.github.com/%s?access_token=%s" % (url, token) if page: url += '&page=%s' % page if data is None: response = requests.get(url) else: headers = { 'Content-type': 'application/json', 'Accept': 'application/vnd.github.she-hulk-preview+json' } response = requests.post(url, data=json.dumps(data), headers=headers) if settings.DEBUG: print((response.headers.get('X-RateLimit-Remaining'))) return response
Python
0.000001
@@ -3750,16 +3750,33 @@ .format( +%0A build.de @@ -3804,16 +3804,29 @@ nt_url() +%0A )%0A
0202eeed429149cbfafd53d9ba6281a0926ea9df
Add labels to account forms and add a NewUserWithPasswordForm that adds password inputs to the new user form.
froide/account/forms.py
froide/account/forms.py
from django import forms from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe from django.core.urlresolvers import reverse from django.contrib.auth.models import User from helper.widgets import EmailInput class NewUserForm(forms.Form): first_name = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'placeholder': _('First Name'), 'class': 'inline'})) last_name = forms.CharField(max_length=30, widget=forms.TextInput( attrs={'placeholder': _('Last Name'), 'class': 'inline'})) user_email = forms.EmailField(widget=EmailInput( attrs={'placeholder': _('[email protected]')})) def clean_first_name(self): return self.cleaned_data['first_name'].strip() def clean_last_name(self): return self.cleaned_data['last_name'].strip() def clean_user_email(self): email = self.cleaned_data['user_email'] try: User.objects.get(email=email) except User.DoesNotExist: pass else: raise forms.ValidationError(mark_safe( _('This email address already has an account. <a href="%s?simple" class="target-small">Please login using that email address.</a>') % reverse("account-login"))) return email class UserLoginForm(forms.Form): email = forms.EmailField(widget=EmailInput( attrs={'placeholder': _('[email protected]')})) password = forms.CharField(widget=forms.PasswordInput)
Python
0
@@ -314,32 +314,79 @@ d(max_length=30, +%0A label=_('First name'),%0A widget=forms.Te @@ -423,32 +423,48 @@ _('First Name'), +%0A 'class': 'inlin @@ -515,16 +515,62 @@ ngth=30, +%0A label=_('Last name'),%0A widget= @@ -581,33 +581,24 @@ s.TextInput( -%0A attrs=%7B'plac @@ -622,16 +622,32 @@ Name'), +%0A 'class' @@ -686,32 +686,70 @@ orms.EmailField( +label=_('Email address'),%0A widget=EmailInpu @@ -746,33 +746,24 @@ =EmailInput( -%0A attrs=%7B'plac @@ -1426,16 +1426,489 @@ email%0A%0A +class NewUserWithPasswordForm(NewUserForm):%0A password = forms.CharField(widget=forms.PasswordInput,%0A label=_('Password'))%0A password2 = forms.CharField(widget=forms.PasswordInput,%0A label=_('Password (repeat)'))%0A%0A def clean(self):%0A cleaned = super(NewUserWithPasswordForm, self).clean()%0A if cleaned%5B'password'%5D != cleaned%5B'password2'%5D:%0A raise forms.ValidationError(_(%22Passwords do not match!%22))%0A return cleaned%0A%0A class Us @@ -2032,16 +2032,50 @@ .net')%7D) +,%0A label=_('Email address') )%0A pa @@ -2125,10 +2125,39 @@ ordInput +,%0A label=_('Password') )%0A
2db84e6c94fdc8de821a98442ce928db9dd73441
Sponsored event should dump title
src/remotedb/dumpers.py
src/remotedb/dumpers.py
import collections import functools import urllib.parse from django.core.serializers.json import DjangoJSONEncoder SITE_PREFIX = 'https://tw.pycon.org/2016/media/' USER_DUMP_KEYS = [ 'bio', 'email', 'speaker_name', 'facebook_profile_url', 'github_id', 'twitter_id', ] PROPOSAL_DUMP_KEYS = SPONSORED_EVENT_DUMP_KEYS = [ 'abstract', 'category', 'detailed_description', 'language', 'python_level', 'recording_policy', 'slide_link', 'title', ] def dump_user(user): data = {key: getattr(user, key) for key in USER_DUMP_KEYS} if user.photo: data['photo_url'] = urllib.parse.urljoin(SITE_PREFIX, user.photo.url) return data def dump_proposal(proposal): data = {key: getattr(proposal, key) for key in PROPOSAL_DUMP_KEYS} data['speakers'] = [dump_user(info.user) for info in proposal.speakers] return data def dump_sponsored_event_detail(event): data = {key: getattr(event, key) for key in SPONSORED_EVENT_DUMP_KEYS} data['speakers'] = [dump_user(event.host)] return data json_encoder = DjangoJSONEncoder() def event_dumper(f): """Decorator to provide dumping of common event fields. """ @functools.wraps(f) def inner(obj): data = { 'begin_time': json_encoder.encode(obj.begin_time.value).strip('"'), 'end_time': json_encoder.encode(obj.end_time.value).strip('"'), 'location': obj.location, } data.update(f(obj)) return data return inner @event_dumper def dump_keynote_event(event): return { 'type': 'keynote', 'speakers': [event.speaker_name], } @event_dumper def dump_custom_event(event): return { 'type': 'custom', 'title': event.title, } @event_dumper def dump_sponsored_event(event): return { 'type': 'sponsored_talk', 'speakers': [event.host.speaker_name], 'detail_id': 'sponsored_{}'.format(event.pk) } @event_dumper def dump_proposed_talk_event(event): return { 'type': 'talk', 'title': event.proposal.title, 'speakers': [ speaker.user.speaker_name for speaker in event.proposal.speakers ], 'detail_id': str(event.proposal.pk), } EVENT_LOADERS = { 'keynoteevent': dump_keynote_event, 'customevent': dump_custom_event, 'sponsoredevent': dump_sponsored_event, 'proposedtalkevent': dump_proposed_talk_event, } def dump_schedule(event_iter): schedule_data_lists = collections.defaultdict(list) for event in event_iter: loader = EVENT_LOADERS[event._meta.model_name] data = loader(event) key = data['begin_time'].split('T', 1)[0] schedule_data_lists[key].append(data) for data_list in schedule_data_lists.values(): data_list.sort(key=lambda data: (data['begin_time'], data['location'])) return schedule_data_lists
Python
0.999635
@@ -1834,24 +1834,54 @@ ored_talk',%0A + 'title': event.title,%0A 'spe
487e1f1d07eb7f2bf16315432e48a3c70681da32
Fix warning
froide/helper/spam.py
froide/helper/spam.py
import logging from datetime import timedelta, datetime from django.core.cache import cache from django import forms from django.conf import settings from django.contrib.gis.geoip2 import GeoIP2 from django.utils.translation import gettext_lazy as _ from froide.helper.utils import get_client_ip logger = logging.getLogger(__name__) def suspicious_ip(request): target_countries = settings.FROIDE_CONFIG.get('target_countries', None) if target_countries is None: return False try: g = GeoIP2() ip = get_client_ip(request) if ip == '127.0.0.1': # Consider suspicious return True info = g.country(ip) if info['country_code'] not in target_countries: return True except Exception as e: logger.exception(e) return False def too_many_actions(request, action, threshold=3, increment=False): ip_address = get_client_ip(request) cache_key = 'fds:limit_action:%s:%s' % (action, ip_address) count = cache.get(cache_key, 0) if increment: if count == 0: cache.set(cache_key, 1, timeout=60 * 60) else: try: cache.incr(cache_key) except ValueError: pass return count > threshold class HoneypotField(forms.CharField): is_honeypot = True class SpamProtectionMixin: ''' Mixin that can triggers spam checking on forms ''' SPAM_PROTECTION = {} def __init__(self, *args, **kwargs): if not hasattr(self, 'request'): self.request = kwargs.pop('request', None) kwargs.pop('request', None) super().__init__(*args, **kwargs) self.fields['phone'] = HoneypotField( required=False, label=_('If you enter anything in this field ' 'your action will be blocked.'), widget=forms.TextInput( attrs={'required': True} ) ) if self._should_include_captcha(): self.fields['test'] = forms.CharField( label=_('What is three plus four?'), widget=forms.TextInput( attrs={'class': 'form-control'} ), required=True, help_text=_('Please answer this question to give evidence you are human.'), ) if self._should_include_timing(): self.fields['time'] = forms.FloatField( initial=datetime.utcnow().timestamp(), widget=forms.HiddenInput ) def _should_include_timing(self): if not settings.FROIDE_CONFIG.get('spam_protection', True): return False return self.SPAM_PROTECTION.get('timing', False) def _should_skip_spam_check(self): if not settings.FROIDE_CONFIG.get('spam_protection', True): return True return not self.request or self.request.user.is_authenticated def _should_include_captcha(self): if self._should_skip_spam_check(): return False if self.SPAM_PROTECTION.get('captcha') == 'always': return True if self.SPAM_PROTECTION.get('captcha') == 'ip' and self.request: return suspicious_ip(self.request) if self._too_man_actions(increment=False): return True return False def clean_phone(self): """Check that nothing's been entered into the honeypot.""" value = self.cleaned_data["phone"] if value: raise forms.ValidationError(self.fields["phone"].label) return value def clean_test(self): t = self.cleaned_data['test'] if t.lower().strip() not in ('7', str(_('seven'))): raise forms.ValidationError(_('Failed.')) return t def clean_time(self): value = self.cleaned_data["time"] since = datetime.utcnow() - datetime.fromtimestamp(value) if since < timedelta(seconds=15): raise forms.ValidationError( _('You filled this form out too quickly.') ) return value def _too_man_actions(self, increment=False): if not self.request: return False action = self.SPAM_PROTECTION.get('action') if not action: return False return too_many_actions( self.request, action, threshold=self.SPAM_PROTECTION.get('action_limit', 3), increment=increment ) def clean(self): super().clean() if self._should_skip_spam_check(): return too_many = self._too_man_actions(increment=True) should_block = self.SPAM_PROTECTION.get('action_block', False) if too_many and should_block and not self.cleaned_data.get('test'): raise forms.ValidationError(_('Too many actions.'))
Python
0.000008
@@ -800,17 +800,15 @@ ger. -exception +warning (e)%0A
846adf9c0b96ab18367258a19fd2c15a9cef7473
Add fallback
src/sentry_slack/plugin.py
src/sentry_slack/plugin.py
""" sentry_slack.plugin ~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2015 by Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import operator import sentry_slack from django import forms from django.db.models import Q from sentry import http from sentry.models import TagKey, TagValue from sentry.plugins.bases import notify from sentry.utils import json import urllib LEVEL_TO_COLOR = { 'debug': 'cfd3da', 'info': '2788ce', 'warning': 'f18500', 'error': 'f43f20', 'fatal': 'd20f2a', } class SlackOptionsForm(notify.NotificationConfigurationForm): webhook = forms.URLField( help_text='Your custom Slack webhook URL', widget=forms.TextInput(attrs={'class': 'span8'})) include_tags = forms.BooleanField( help_text='Include tags with notifications', required=False, ) include_rules = forms.BooleanField( help_text='Include triggering rules with notifications', required=False, ) class SlackPlugin(notify.NotificationPlugin): author = 'Sentry Team' author_url = 'https://github.com/getsentry' resource_links = ( ('Bug Tracker', 'https://github.com/getsentry/sentry-slack/issues'), ('Source', 'https://github.com/getsentry/sentry-slack'), ) title = 'Slack' slug = 'slack' description = 'Post notifications to a Slack channel.' conf_key = 'slack' version = sentry_slack.VERSION project_conf_form = SlackOptionsForm def is_configured(self, project): return all((self.get_option(k, project) for k in ('webhook',))) def color_for_group(self, group): return '#' + LEVEL_TO_COLOR.get(group.get_level_display(), 'error') def get_tags(self, event): # TODO(dcramer): we want this behavior to be more accessible in sentry core tag_list = event.get_tags() if not tag_list: return () key_labels = { o.key: o.get_label() for o in TagKey.objects.filter( project=event.project, key__in=[t[0] for t in tag_list], ) } value_labels = { (o.key, o.value): o.get_label() for o in TagValue.objects.filter( reduce(operator.or_, (Q(key=k, value=v) for k, v in tag_list)), project=event.project, ) } return ( (key_labels.get(k, k), value_labels.get((k, v), v)) for k, v in tag_list ) def notify(self, notification): event = notification.event group = event.group project = group.project if not self.is_configured(project): return webhook = self.get_option('webhook', project) team = event.team title = group.message_short.encode('utf-8') culprit = group.culprit.encode('utf-8') fields = [] # They can be the same if there is no culprit # So we set culprit to an empty string instead of duplicating the text if title != culprit: fields.append({ 'title': 'Culprit', 'value': culprit, 'short': False, }) fields.append({ 'title': 'Project', 'value': '%s / %s' % ( team.name.encode('utf-8'), project.name.encode('utf-8'), ), 'short': True, }) if self.get_option('include_rules', project): rules = [] for rule in notification.rules: rule_link = reverse('sentry-edit-project-rule', args=[ group.organization.slug, project.slug, rule.id ]) rules.append((rule_link, rule.label.encode('utf-8'))) if rules: fields.append({ 'title': 'Triggered By', 'value': ', '.join('<%s | %s>' % r for r in rules), 'short': False, }) if self.get_option('include_tags', project): for tag_key, tag_value in self.get_tags(event): fields.append({ 'title': tag_key.encode('utf-8'), 'value': tag_value.encode('utf-8'), 'short': True, }) payload = { 'parse': 'none', 'attachments': [{ 'title': title, 'title_link': group.get_absolute_url(), 'color': self.color_for_group(group), 'fields': fields, }] } values = {'payload': json.dumps(payload)} return http.safe_urlopen(webhook, method='POST', data=values)
Python
0.000004
@@ -999,16 +999,17 @@ %0A )%0A%0A +%0A class Sl @@ -4418,16 +4418,51 @@ ts': %5B%7B%0A + 'fallback': title,%0A
7618839b4fb3d52f1ac083f2ead48eb5f2f4a00c
Fix accidental tab
mk2/plugins/shutdown.py
mk2/plugins/shutdown.py
from mk2.plugins import Plugin from mk2.events import Hook, ServerStop, StatPlayers, StatPlayerCount class Shutdown(Plugin): restart_warn_message = Plugin.Property(default="WARNING: planned restart in {delay}.") stop_warn_message = Plugin.Property(default="WARNING: server going down for planned maintainence in {delay}.") restart_message = Plugin.Property(default="Server restarting.") stop_message = Plugin.Property(default="Server going down for maintainence.") restart_cancel_message = Plugin.Property(default="WARNING: planned restart cancelled.") restart_cancel_reason = Plugin.Property(default="WARNING: planned restart cancelled ({reason}).") stop_cancel_message = Plugin.Property(default="WARNING: planned maintenance cancelled.") stop_cancel_reason = Plugin.Property(default="WARNING: planned maintenance cancelled ({reason}).") alert_command = Plugin.Property(default="say %s") kick_command = Plugin.Property(default="kick {player} {message}") kick_mode = Plugin.Property(default="all") failsafe = None cancel_preempt = 0 restart_on_empty = False restore = ('cancel_preempt', 'cancel', 'restart_on_empty') def setup(self): self.players = [] self.cancel = [] self.register(self.handle_players, StatPlayers) self.register(self.handle_player_count, StatPlayerCount) self.register(self.h_stop, Hook, public=True, name="stop", doc='cleanly stop the server. specify a delay like `~stop 2m`') self.register(self.h_restart, Hook, public=True, name="restart", doc='cleanly restart the server. specify a delay like `~restart 30s`') self.register(self.h_restart_empty, Hook, public=True, name="restart-empty",doc='restart the server next time it has 0 players') self.register(self.h_kill, Hook, public=True, name="kill", doc='kill the server') self.register(self.h_kill_restart, Hook, public=True, name="kill-restart", doc='kill the server and bring it back up') self.register(self.h_cancel, Hook, public=True, name="cancel", doc='cancel an upcoming shutdown or restart') def server_started(self, event): self.restart_on_empty = False self.cancel_preempt = 0 def warn_restart(self, delay): self.send_format(self.alert_command % self.restart_warn_message, delay=delay) def warn_stop(self, delay): self.send_format(self.alert_command % self.stop_warn_message, delay=delay) def warn_cancel(self, reason, thing): if reason: message = self.restart_cancel_reason if thing == "restart" else self.stop_cancel_reason else: message = self.restart_cancel_message if thing == "restart" else self.stop_cancel_message self.send_format(self.alert_command % message, reason=reason) def nice_stop(self, respawn, kill): if not kill: message = self.restart_message if respawn else self.stop_message if self.kick_mode == 'all': for player in self.players: self.send_format(self.kick_command, player=player, message=message) elif self.kick_mode == 'once': self.send_format(self.kick_command, message=message) self.dispatch(ServerStop(reason='console', respawn=respawn, kill=kill)) def handle_players(self, event): self.players = event.players def handle_player_count(self, event): if event.players_current == 0 and self.restart_on_empty: self.restart_on_empty = False self.nice_stop(True, False) def cancel_something(self, reason=None): thing, cancel = self.cancel.pop(0) cancel(reason, thing) def should_cancel(self): if self.cancel_preempt: self.cancel_preempt -= 1 return True else: return False #Hook handlers: def h_stop(self, event=None): if self.should_cancel(): self.console("I'm not stopping because this shutdown was cancelled with ~cancel") return action = lambda: self.nice_stop(False, False) if event and event.args: warn_length, action, cancel = self.action_chain_cancellable(event.args, self.warn_stop, action, self.warn_cancel) self.cancel.append(("stop", cancel)) action() def h_restart(self, event=None): if self.should_cancel(): self.console("I'm not restarting because this shutdown was cancelled with ~cancel") return action = lambda: self.nice_stop(True, False) if event and event.args: warn_length, action, cancel = self.action_chain_cancellable(event.args, self.warn_restart, action, self.warn_cancel) self.cancel.append(("restart", cancel)) action() def h_restart_empty(self, event): if self.restart_on_empty: self.console("I was already going to do that") else: self.console("I will restart the next time the server empties") self.restart_on_empty = True def h_kill(self, event): self.nice_stop(False, True) def h_kill_restart(self, event): self.nice_stop(True, True) def h_cancel(self, event): if self.cancel: self.cancel_something(event.args or None) else: self.cancel_preempt += 1 self.console("I will cancel the next thing")
Python
0.000035
@@ -902,9 +902,12 @@ .%22)%0A -%09 + aler
468ca16319977628bdae2be527514a30b35dd6fa
Fixed a misplaced parenthesis.
django/core/management/commands/syncdb.py
django/core/management/commands/syncdb.py
from django.core.management.base import NoArgsCommand from django.core.management.color import no_style from django.utils.importlib import import_module from optparse import make_option import sys try: set except NameError: from sets import Set as set # Python 2.3 fallback class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option('--noinput', action='store_false', dest='interactive', default=True, help='Tells Django to NOT prompt the user for input of any kind.'), ) help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created." def handle_noargs(self, **options): from django.db import connection, transaction, models from django.conf import settings from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive') show_traceback = options.get('traceback', False) self.style = no_style() # Import the 'management' module within each installed app, to register # dispatcher events. for app_name in settings.INSTALLED_APPS: try: import_module('.management', app_name) except ImportError, exc: # This is slightly hackish. We want to ignore ImportErrors # if the "management" module itself is missing -- but we don't # want to ignore the exception if the management module exists # but raises an ImportError for some reason. The only way we # can do this is to check the text of the exception. Note that # we're a bit broad in how we check the text, because different # Python implementations may not use the same text. # CPython uses the text "No module named management" # PyPy uses "No module named myproject.myapp.management" msg = exc.args[0] if not msg.startswith('No module named') or 'management' not in msg: raise cursor = connection.cursor() # Get a list of already installed *models* so that references work right. tables = connection.introspection.table_names() seen_models = connection.introspection.installed_models(tables) created_models = set() pending_references = {} # Create the tables for each model for app in models.get_apps(): app_name = app.__name__.split('.')[-2] model_list = models.get_models(app, include_auto_created=True) for model in model_list: # Create the model's database table, if it doesn't already exist. if verbosity >= 2: print "Processing %s.%s model" % (app_name, model._meta.object_name) opts = model._meta if (connection.introspection.table_name_converter(opts.db_table) in tables or (opts.auto_created and connection.introspection.table_name_converter(opts.auto_created._meta.db_table in tables))): continue sql, references = connection.creation.sql_create_model(model, self.style, seen_models) seen_models.add(model) created_models.add(model) for refto, refs in references.items(): pending_references.setdefault(refto, []).extend(refs) if refto in seen_models: sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references)) sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references)) if verbosity >= 1 and sql: print "Creating table %s" % model._meta.db_table for statement in sql: cursor.execute(statement) tables.append(connection.introspection.table_name_converter(model._meta.db_table)) transaction.commit_unless_managed() # Send the post_syncdb signal, so individual apps can do whatever they need # to do at this point. emit_post_sync_signal(created_models, verbosity, interactive) # The connection may have been closed by a syncdb handler. cursor = connection.cursor() # Install custom SQL for the app (but only if this # is a model we've just created) for app in models.get_apps(): app_name = app.__name__.split('.')[-2] for model in models.get_models(app): if model in created_models: custom_sql = custom_sql_for_model(model, self.style) if custom_sql: if verbosity >= 1: print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name) try: for sql in custom_sql: cursor.execute(sql) except Exception, e: sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \ (app_name, model._meta.object_name, e)) if show_traceback: import traceback traceback.print_exc() transaction.rollback_unless_managed() else: transaction.commit_unless_managed() else: if verbosity >= 2: print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name) # Install SQL indicies for all newly created models for app in models.get_apps(): app_name = app.__name__.split('.')[-2] for model in models.get_models(app): if model in created_models: index_sql = connection.creation.sql_indexes_for_model(model, self.style) if index_sql: if verbosity >= 1: print "Installing index for %s.%s model" % (app_name, model._meta.object_name) try: for sql in index_sql: cursor.execute(sql) except Exception, e: sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \ (app_name, model._meta.object_name, e)) transaction.rollback_unless_managed() else: transaction.commit_unless_managed() # Install the 'initial_data' fixture, using format discovery from django.core.management import call_command call_command('loaddata', 'initial_data', verbosity=verbosity)
Python
0.999999
@@ -3199,16 +3199,17 @@ db_table +) in tabl @@ -3212,17 +3212,16 @@ tables)) -) :%0A
3c4e65f123dc56255262e38a934b9cacd03c0bfe
remove debug prints
django_babel/management/commands/babel.py
django_babel/management/commands/babel.py
#-*- coding: utf-8 -*- import os from distutils.dist import Distribution from optparse import make_option from subprocess import call from django.core.management.base import LabelCommand, CommandError from django.conf import settings class Command(LabelCommand): args = '[makemessages] [compilemessages]' option_list = LabelCommand.option_list + ( make_option('--locale', '-l', default=None, dest='locale', action='append', help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' 'Can be used multiple times.'), make_option('--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").'), make_option('--mapping-file', '-F', default=None, dest='mapping_file', help='Mapping file') ) def handle_label(self, command, **options): if command not in ('makemessages', 'compilemessages'): raise CommandError("You must either apply 'makemessages' or 'compilemessages'") if command == 'makemessages': self.handle_makemessages(**options) if command == 'compilemessages': self.handle_compilemessages(**options) def handle_makemessages(self, **options): locale_paths = list(settings.LOCALE_PATHS) domain = options.pop('domain') locales = options.pop('locale') # support for mapping file specification via setup.cfg # TODO: Try to support all possible options. distribution = Distribution() distribution.parse_config_files(distribution.find_config_files()) mapping_file = options.pop('mapping_file', None) if mapping_file is None and 'extract_messages' in distribution.command_options: opts = distribution.command_options['extract_messages'] try: mapping_file = opts.get('mapping_file', ())[1] except IndexError: mapping_file = None for path in locale_paths: potfile = os.path.join(path, '%s.pot' % domain) if not os.path.exists(potfile): continue cmd = ['pybabel', 'extract', '-o', os.path.join(path, '%s.pot' % domain)] if mapping_file is not None: cmd.extend(['-F', mapping_file]) cmd.append(os.path.dirname(path)) print cmd call(cmd) for locale in locales: cmd = ['pybabel', 'update', '-D', domain, '-i', os.path.join(path, '%s.pot' % domain), '-d', path, '-l', locale] print cmd call(cmd) def handle_compilemessages(self, **options): locale_paths = list(settings.LOCALE_PATHS) domain = options.pop('domain') locales = options.pop('locale') for path in locale_paths: for locale in locales: po_file = os.path.join(path, locale, 'LC_MESSAGES', domain + '.po') if os.path.exists(po_file): cmd = ['pybabel', 'compile', '-D', domain, '-d', path, '-l', locale] call(cmd)
Python
0.000001
@@ -2404,38 +2404,16 @@ path))%0A%0A - print cmd%0A @@ -2661,34 +2661,8 @@ le%5D%0A - print cmd%0A
294254aad0d798ffcfca6e34b48b4ed704bb5cd0
Simplify CachingManager logic
django_prices_openexchangerates/models.py
django_prices_openexchangerates/models.py
from __future__ import unicode_literals from decimal import Decimal from django.conf import settings from django.core.exceptions import ValidationError from django.core.cache import cache from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import python_2_unicode_compatible from .currencies import CURRENCIES BASE_CURRENCY = getattr(settings, 'OPENEXCHANGERATES_BASE_CURRENCY', 'USD') CACHE_KEY = getattr(settings, 'OPENEXCHANGERATES_CACHE_KEY', 'conversion_rates') CACHE_TIME = getattr(settings, 'OPENEXCHANGERATES_CACHE_TTL', 60*60) class CachingManager(models.Manager): def get_rate(self, to_currency): # noqa conversion_rates = cache.get(CACHE_KEY) update_cache = False if not conversion_rates: conversion_rates = {} update_cache = True if to_currency not in conversion_rates: rates = self.all() for rate in rates: conversion_rates[rate.to_currency] = rate try: rate = conversion_rates[to_currency] except KeyError: rate = self.get(to_currency=to_currency) conversion_rates[to_currency] = rate update_cache = True if update_cache: cache.set(CACHE_KEY, conversion_rates, CACHE_TIME) return rate @python_2_unicode_compatible class ConversionRate(models.Model): base_currency = BASE_CURRENCY to_currency = models.CharField( _('To'), max_length=3, db_index=True, choices=CURRENCIES.items(), unique=True) rate = models.DecimalField( _('Conversion rate'), max_digits=20, decimal_places=12) modified_at = models.DateTimeField(auto_now=True) objects = CachingManager() class Meta: ordering = ['to_currency'] def save(self, *args, **kwargs): # noqa """ Save the model instance but only on successful validation. """ self.full_clean() super(ConversionRate, self).save(*args, **kwargs) def clean(self): # noqa if self.rate <= Decimal(0): raise ValidationError('Conversion rate has to be positive') if self.base_currency == self.to_currency: raise ValidationError( 'Can\'t set a conversion rate for the same currency') super(ConversionRate, self).clean() def __str__(self): # noqa return '1 %s = %.04f %s' % (self.base_currency, self.rate, self.to_currency) def __repr__(self): # noqa return ( 'ConversionRate(pk=%r, base_currency=%r, to_currency=%r, rate=%r)' % ( self.pk, self.base_currency, self.to_currency, self.rate))
Python
0.000007
@@ -599,96 +599,27 @@ )%0A%0A%0A -class CachingManager(models.Manager):%0A%0A def get_rate(self, to_currency): # noqa%0A +def get_rates(qs):%0A @@ -662,41 +662,8 @@ EY)%0A - update_cache = False%0A @@ -687,36 +687,32 @@ _rates:%0A - conversion_rates @@ -719,210 +719,261 @@ = %7B -%7D%0A update_cache = True%0A%0A if to_currency not in conversion_rates:%0A rates = self.all()%0A for rate in rates:%0A conversion_rates%5Brate.to_currency%5D = rate +rate.to_currency: rate for rate in qs%7D%0A cache.set(CACHE_KEY, conversion_rates, CACHE_TIME)%0A return conversion_rates%0A%0A%0Aclass CachingManager(models.Manager):%0A%0A def get_rate(self, to_currency): # noqa%0A all_rates = get_rates(self.all()) %0A @@ -999,24 +999,17 @@ r -ate = conversion +eturn all _rat @@ -1065,62 +1065,16 @@ -rate = self.get(to_currency=to_currency)%0A c +msg = 'C onve @@ -1082,126 +1082,69 @@ sion -_rates%5Bto_currency%5D = rate%0A update_cache = True%0A if update_cache:%0A cache.set(CACHE_KEY, c +Rate for %25s does not exist' %25 to_currency%0A raise C onve @@ -1152,47 +1152,30 @@ sion -_rates, CACHE_TIME)%0A return rate +Rate.DoesNotExist(msg) %0A%0A%0A@
c2fedd9a34f01c5268e13caf48aa57cd80fca423
correct error message
pyanno/util.py
pyanno/util.py
# Copyright (c) 2011, Enthought, Ltd. # Authors: Pietro Berkes <[email protected]>, Andrey Rzhetsky, # Bob Carpenter # License: Modified BSD license (2-clause) import numpy as np from numpy import log from numpy.core import getlimits from scipy.special import gammaln import time import logging logger = logging.getLogger(__name__) MISSING_VALUE = -1 SMALLEST_FLOAT = getlimits.finfo(np.float).min class PyannoValueError(ValueError): """ValueError subclass raised by pyanno functions and methods. """ pass def random_categorical(distr, nsamples): """Return an array of samples from a categorical distribution.""" assert np.allclose(distr.sum(), 1., atol=1e-8) cumulative = distr.cumsum() return cumulative.searchsorted(np.random.random(nsamples)) def ninf_to_num(x): """Substitute -inf with smallest floating point number.""" is_neg_inf = np.isneginf(x) x[is_neg_inf] = SMALLEST_FLOAT return x def dirichlet_llhood(theta, alpha): """Compute the log likelihood of theta under Dirichlet(alpha).""" # substitute -inf with SMALLEST_FLOAT, so that 0*log(0) is 0 when necessary log_theta = ninf_to_num(log(theta)) #log_theta = np.nan_to_num(log_theta) return (gammaln(alpha.sum()) - (gammaln(alpha)).sum() + ((alpha - 1.) * log_theta).sum()) # TODO remove default condition when x[i] == 0. def normalize(x, dtype=float): """Returns a normalized distribution (sums to 1.0).""" x = np.asarray(x, dtype=dtype) z = x.sum() if z <= 0: x = np.ones_like(x) z = float(len(x)) return x / z def create_band_matrix(shape, diagonal_elements): diagonal_elements = np.asarray(diagonal_elements) def diag(i,j): x = np.absolute(i-j) x = np.minimum(diagonal_elements.shape[0]-1, x).astype(int) return diagonal_elements[x] return np.fromfunction(diag, shape) # TODO clean up and simplify and rename def compute_counts(annotations, nclasses): """Transform annotation data in counts format. At the moment, it is hard coded for 8 annotators, 3 annotators active at any time. Input: annotations -- Input data (integer array, nitems x 8) nclasses -- number of annotation values (# classes) Ouput: data -- data[i,j] is the number of times the combination of annotators number `j` voted according to pattern `i` (integer array, nclasses^3 x 8) """ index = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [0, 6, 7], [0, 1, 7]], int) m = annotations.shape[0] n = annotations.shape[1] annotations = np.asarray(annotations, dtype=int) assert n==8, 'Strange: ' + str(n) + 'annotator number !!!' # compute counts of 3-annotator patterns for 8 triplets of annotators data = np.zeros((nclasses ** 3, 8), dtype=int) # transform each triple of annotations into a code in base `nclasses` for i in range(m): ind = np.where(annotations[i, :] >= 0) code = annotations[i, ind[0][0]] * (nclasses ** 2) +\ annotations[i, ind[0][1]] * nclasses +\ annotations[i, ind[0][2]] # o = index of possible combination of annotators in the loop design o = -100 for j in range(8): k = 0 for l in range(3): if index[j, l] == ind[0][l]: k += 1 if k == 3: o = j if o >= 0: data[code, o] += 1 else: logger.debug(str(code) + " " + str(ind) + " = homeless code") return data def labels_count(annotations, nclasses, missing_val=MISSING_VALUE): """Compute the total count of labels in observed annotations.""" valid = annotations!=missing_val nobservations = valid.sum() if nobservations == 0: # no valid observations raise PyannoValueError('No valid observations') return np.bincount(annotations[valid], minlength=nclasses) def labels_frequency(annotations, nclasses, missing_val=MISSING_VALUE): """Compute the total frequency of labels in observed annotations.""" valid = annotations!=missing_val nobservations = valid.sum() if nobservations == 0: # no valid observations raise PyannoValueError('No valid observations') return (np.bincount(annotations[valid], minlength=nclasses) / float(nobservations)) def is_valid(annotations): """Return True if annotation is valid. An annotation is valid if it is not equal to the missing value, MISSING_VALUE. """ return annotations != MISSING_VALUE def majority_vote(annotations): """Compute an estimate of the real class by majority vote. In case of ties, return the class with smallest number. Parameters ---------- annotations : ndarray, shape = (n_items, n_annotators) annotations[i,j] is the annotation made by annotator j on item i Return ------ vote : ndarray, shape = (n_items, ) vote[i] is the majority vote estimate for item i """ nitems = annotations.shape[0] valid = is_valid(annotations) vote = np.empty((nitems,), dtype=int) for i in xrange(nitems): count = np.bincount(annotations[i,valid[i,:]]) vote[i] = count.argmax() return vote def string_wrap(st, mode): st = str(st) if mode == 1: st = "\033[1;29m" + st + "\033[0m" elif mode == 2: st = "\033[1;34m" + st + "\033[0m" elif mode == 3: st = "\033[1;44m" + st + "\033[0m" elif mode == 4: st = "\033[1;35m" + st + "\033[0m" elif mode == 5: st = "\033[1;33;44m" + st + "\033[0m" elif mode == 5: st = "\033[1;47;34m" + st + "\033[0m" else: st = st + ' ' return st class benchmark(object): def __init__(self,name): self.name = name def __enter__(self): print '---- start ----' self.start = time.time() def __exit__(self,ty,val,tb): end = time.time() print '---- stop ----' print("%s : %0.3f seconds" % (self.name, end-self.start)) return False def check_unchanged(func_new, func_old, *args, **kwargs): with benchmark('new'): res_new = func_new(*args, **kwargs) print 'New function returns:', res_new with benchmark('old'): res_old = func_old(*args, **kwargs) print 'Old function returns:', res_old return res_old
Python
0.000009
@@ -4127,38 +4127,37 @@ Error('No valid -observ +annot ations')%0D%0A%0D%0A @@ -4538,30 +4538,29 @@ r('No valid -observ +annot ations')%0D%0A%0D%0A
a21b9588002013c5efff895e63f29fe362110656
Spell checker: identify multiple positions of mispelled word - precision : 0.05457300369812355 - recall : 0.6653793967226803
src/righter/__init__.py
src/righter/__init__.py
""" Identifies common English writing mistakes """ import re import unicodedata from righter import dictionary from righter import utils def check_spelling(text): """ Check if a text has spelling errors. Return a list with objects: { "selection": <wrong-spelled-word>, "start": <position-of-the-first-character-in-string> } """ text = text.lower() text = utils.remove_punctuation(text) words = text.split() response = [] for word in words: if not dictionary.is_english_word(word) and\ not utils.contains_digit(word): item = { "selection": word, "start": text.find(word) } response.append(item) return response def check_capitalization(text): """ Check if a text has spelling errors. Return a list with objects: { "selection": <wrong-capitalized-word>, "start": <position-of-the-first-character-in-string> } """ response = [] sentences = re.split('[!?.]', text) # TODO: add \n pos = 0 for sentence in sentences: clean_sentence = sentence.strip() if not clean_sentence: continue # Check if first character is capital if clean_sentence[0].islower(): first_word = clean_sentence.split()[0] first_word_position = pos + sentence.find(first_word) item = { "selection": first_word, "start": first_word_position } response.append(item) else: # check if a common English word in the middle of the text is # wrongly capitalized words = clean_sentence.split() for word in words[1:]: if word[0].isupper() and\ dictionary.is_english_word(word.lower()): item = { "selection": word, "start": text.find(word) } response.append(item) pos += len(sentence) + 1 return response def check(text): changes = [] for change in check_capitalization(text): change['symbol'] = 'C' changes.append(change) for change in check_spelling(text): change['symbol'] = 'SP' changes.append(change) return changes
Python
0.999581
@@ -132,16 +132,319 @@ utils%0A%0A%0A +def findall(sub, string):%0A %22%22%22%0A %3E%3E%3E text = %22Allowed Hello Hollow%22%0A %3E%3E%3E tuple(findall('ll', text))%0A (1, 10, 16)%0A %22%22%22%0A index = 0 - len(sub)%0A try:%0A while True:%0A index = string.index(sub, index + len(sub))%0A yield index%0A except ValueError:%0A pass%0A%0A%0A def chec @@ -446,32 +446,41 @@ check_spelling( +original_ text):%0A %22%22%22%0A @@ -702,16 +702,25 @@ text = +original_ text.low @@ -928,16 +928,64 @@ (word):%0A + for pos in findall(word, text):%0A @@ -989,32 +989,36 @@ item = %7B%0A + @@ -1034,69 +1034,147 @@ n%22: -word,%0A %22start%22: text.find(word)%0A %7D%0A +original_text%5Bpos: (pos + len(word))%5D,%0A %22start%22: pos%0A %7D%0A if item not in response:%0A
0cb57a8cd6aa00ee2692e019f78fc6301231d99d
fix bug?
pyautodroid.py
pyautodroid.py
# -*- coding: utf-8 -*- '''Android UI Testing Module for Nox App Player''' import os from subprocess import Popen, PIPE, call import cv2 ADB_PATH = 'C:/Program Files (x86)/Nox/bin/nox_adb.exe' SHARED_DIR = os.getenv('HOMEPATH')+'/Nox_share' LATEST_MATCH_LOC = [0, 0] def find_img(device, temp, threshold=0.97): file_id = device.replace(':', '_') img = cv2.imread(SHARED_DIR+'/Image/screen'+file_id+'.png', 1) template = cv2.imread(temp, 1) (h, w, d) = template.shape # Apply template Matching try: matches = cv2.matchTemplate(img,template,cv2.TM_CCORR_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matches) except: print('## OpenCV Error ##') return False if max_val > threshold: LATEST_MATCH_LOC[0] = int(max_loc[0] + w/2) LATEST_MATCH_LOC[1] = int(max_loc[1] + h/2) #print " ", temp, "= (", LATEST_MATCH_LOC, ")" #print " max_val", max_val return True else: return False def find_imgs(device, temp, maxLen=10, threshold=0.97): matchList = [] file_id = device.replace(':', '_') img = cv2.imread(SHARED_DIR+'/Image/screen'+file_id+'.png', 1) template = cv2.imread(temp, 1) (h, w, d) = template.shape # Apply template Matching matches = cv2.matchTemplate(img,template,cv2.TM_CCORR_NORMED) for y in range(matches.shape[0]): for x in range(matches.shape[1]): if matches[y][x] > threshold: flag = True for element in matchList: distance = (element[0]-x)**2 + (element[1]-y)**2 if 5**2 > distance: flag = False break if flag: matchList.append((x, y)) if len(matchList) >= maxLen: return map(lambda p: (int(p[0] + w/2), int(p[1] + h/2)), matchList) return map(lambda p: (int(p[0] + w/2), int(p[1] + h/2)), matchList) def tap(device, loc, duration=''): call(ADB_PATH+' -s '+device+' shell input tap '+str(loc[0])+' '+str(loc[1])+' '+str(duration)) return def swipe(device, src, dst, duration=500): call(ADB_PATH+' -s '+device+' shell input swipe '+str(src[0])+' '+str(src[1])+' '+str(dst[0])+' '+str(dst[1])+' '+str(duration)) return def send_tap_event(device, loc, event_num): call(ADB_PATH+' -s '+device+' shell "' +'sendevent /dev/input/event'+str(event_num)+' 1 330 1;' +'sendevent /dev/input/event'+str(event_num)+' 3 58 1;' +'sendevent /dev/input/event'+str(event_num)+' 3 53 '+str(loc[0])+';' +'sendevent /dev/input/event'+str(event_num)+' 3 54 '+str(loc[1])+';' +'sendevent /dev/input/event'+str(event_num)+' 0 2 0;' +'sendevent /dev/input/event'+str(event_num)+' 0 0 0;' +'sendevent /dev/input/event'+str(event_num)+' 0 2 0;' +'sendevent /dev/input/event'+str(event_num)+' 0 0 0;' +'sendevent /dev/input/event'+str(event_num)+' 1 330 0;' +'sendevent /dev/input/event'+str(event_num)+' 3 58 0;' +'sendevent /dev/input/event'+str(event_num)+' 3 53 4291588597;' +'sendevent /dev/input/event'+str(event_num)+' 3 54 4294258463;' +'sendevent /dev/input/event'+str(event_num)+' 0 2 0;' +'sendevent /dev/input/event'+str(event_num)+' 0 0 0;"' ) return def open_activity(device, url_scheme): call(ADB_PATH+' -s '+device+' shell am start '+url_scheme) return def pull(device, remote, local='.'): call(ADB_PATH+' -s '+device+' pull '+remote+' '+local) return def push(device, local, remote): call(ADB_PATH+' -s '+device+' push '+local+' '+remote) return def stop_app(device, package): call(ADB_PATH+' -s '+device+' shell am force-stop '+package) return def get_screen(device, path='/mnt/shared/Image/'): file_id = device.replace(':', '_') call(ADB_PATH+' -s '+device+' shell screencap -p '+path+'screen'+file_id+'.png') return def get_input_event_num(device): p = Popen([ADB_PATH, '-s', device, 'shell', 'getevent'], stdout=PIPE) line = "" while True: line = p.stdout.readline().strip() if "Android Input" in line.decode('utf-8'): break buf = line p.kill() return buf.decode('utf-8')[-1]
Python
0
@@ -3226,18 +3226,23 @@ 53 -4291588597 +'+str(loc%5B0%5D)+' ;'%0D%0A @@ -3305,18 +3305,23 @@ 54 -4294258463 +'+str(loc%5B1%5D)+' ;'%0D%0A
2a5fbcd2e3da01150c2690c145100270d3f0ec81
fix clipnorm
model/lang_model_sgd.py
model/lang_model_sgd.py
import copy import numpy as np import tensorflow as tf from keras import backend as K from keras.optimizers import Optimizer from keras.callbacks import LearningRateScheduler from model.setting import Setting class LangModelSGD(Optimizer): def __init__(self, setting, verbose=True): super(LangModelSGD, self).__init__() self.iterations = K.variable(0., name="iterations") self.lr = K.variable(1.0, name="lr") self.epoch_interval = K.variable(setting.epoch_interval) self.decay = K.variable(setting.decay) self._clipnorm = setting.norm_clipping self.verbose = verbose def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads])) grads = [clip_norm(g, self._clipnorm, norm) for g in grads] self.updates = [] self.updates.append(K.update_add(self.iterations, 1)) for p, g in zip(params, grads): self.updates.append((p, p - self.lr * g)) return self.updates def get_config(self): config = {"iterations": float(K.get_value(self.iterations)), "lr": float(K.get_value(self.lr)) } base_config = super(LangModelSGD, self).get_config() return dict(list(base_config.items()) + list(config.items())) def get_lr_scheduler(self): def scheduler(epoch): epoch_interval = K.get_value(self.epoch_interval) if epoch != 0 and (epoch + 1) % epoch_interval == 0: lr = K.get_value(self.lr) decay = K.get_value(self.decay) K.set_value(self.lr, lr * decay) if self.verbose: print(self.get_config()) return K.get_value(self.lr) return LearningRateScheduler(scheduler)
Python
0.000001
@@ -326,18 +326,48 @@ _init__( +clipnorm=setting.norm_clipping )%0A - @@ -588,55 +588,8 @@ ay)%0A - self._clipnorm = setting.norm_clipping%0A @@ -666,24 +666,24 @@ nts, loss):%0A + grad @@ -723,141 +723,8 @@ ms)%0A - norm = K.sqrt(sum(%5BK.sum(K.square(g)) for g in grads%5D))%0A grads = %5Bclip_norm(g, self._clipnorm, norm) for g in grads%5D%0A%0A
db3cee63baf64d00b2d2ac4fcf726f287b6d7af2
Update call to proxy fix to use new method signature
app/proxy_fix.py
app/proxy_fix.py
from werkzeug.middleware.proxy_fix import ProxyFix class CustomProxyFix(object): def __init__(self, app, forwarded_proto): self.app = ProxyFix(app) self.forwarded_proto = forwarded_proto def __call__(self, environ, start_response): environ.update({ "HTTP_X_FORWARDED_PROTO": self.forwarded_proto }) return self.app(environ, start_response) def init_app(app): app.wsgi_app = CustomProxyFix(app.wsgi_app, app.config.get('HTTP_PROTOCOL', 'http'))
Python
0
@@ -153,16 +153,68 @@ yFix(app +, x_for=1, x_proto=1, x_host=1, x_port=0, x_prefix=0 )%0A
47a11510b7a6897c59e470677ad05c9ebcaab0a7
Make window invisible in info.py
pyglet/info.py
pyglet/info.py
#!/usr/bin/env python '''Get environment information useful for debugging. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' _first_heading = True def _heading(heading): global _first_heading if not _first_heading: print else: _first_heading = False print heading print '-' * 78 def dump_python(): '''Dump Python version and environment to stdout.''' import os import sys print 'sys.version:', sys.version print 'sys.platform:', sys.platform print 'os.getcwd():', os.getcwd() for key, value in os.environ.items(): if key.startswith('PYGLET_'): print "os.environ['%s']: %s" % (key, value) def dump_pyglet(): '''Dump pyglet version and options.''' import pyglet print 'pyglet.version:', pyglet.version print 'pyglet.__file__:', pyglet.__file__ for key, value in pyglet.options.items(): print "pyglet.options['%s'] = %r" % (key, value) def dump_window(): '''Dump display, windowm, screen and default config info.''' import pyglet.window platform = pyglet.window.get_platform() print 'platform:', repr(platform) display = platform.get_default_display() print 'display:', repr(display) screens = display.get_screens() for i, screen in enumerate(screens): print 'screens[%d]: %r' % (i, screen) window = pyglet.window.Window() for key, value in window.config.get_gl_attributes(): print "config['%s'] = %r" % (key, value) print 'context:', repr(window.context) window.close() def dump_gl(): '''Dump GL info.''' from pyglet.gl import gl_info print 'gl_info.get_version():', gl_info.get_version() print 'gl_info.get_vendor():', gl_info.get_vendor() print 'gl_info.get_renderer():', gl_info.get_renderer() print 'gl_info.get_extensions():' extensions = list(gl_info.get_extensions()) extensions.sort() for name in extensions: print ' ', name def dump_glu(): '''Dump GLU info.''' from pyglet.gl import glu_info print 'glu_info.get_version():', glu_info.get_version() print 'glu_info.get_extensions():' extensions = list(glu_info.get_extensions()) extensions.sort() for name in extensions: print ' ', name def dump_media(): '''Dump pyglet.media info.''' import pyglet.media print 'driver:', pyglet.media.driver.__name__ def dump_avbin(): '''Dump AVbin info.''' try: import pyglet.media.avbin print 'Library:', pyglet.media.avbin.av print 'AVbin version:', pyglet.media.avbin.av.avbin_get_version() print 'FFmpeg revision:', \ pyglet.media.avbin.av.avbin_get_ffmpeg_revision() except: print 'AVbin not available.' def dump_al(): '''Dump OpenAL info.''' try: from pyglet.media.drivers import openal print 'Library:', openal.al._lib print 'Version:', openal.get_version() print 'Extensions:' for extension in openal.get_extensions(): print ' ', extension except: print 'OpenAL not available.' def _try_dump(heading, func): _heading(heading) try: func() except: import traceback traceback.print_exc() def dump(): '''Dump all information to stdout.''' _try_dump('Python', dump_python) _try_dump('pyglet', dump_pyglet) _try_dump('pyglet.window', dump_window) _try_dump('pyglet.gl.gl_info', dump_gl) _try_dump('pyglet.gl.glu_info', dump_glu) _try_dump('pyglet.media', dump_media) _try_dump('pyglet.media.avbin', dump_avbin) _try_dump('pyglet.media.drivers.openal', dump_al) if __name__ == '__main__': dump()
Python
0.000041
@@ -1385,16 +1385,29 @@ .Window( +visible=False )%0A fo
fee4ec26f52c584faa0aa5e35de955972b7c56bd
return a sorted list so tests can be deterministic
lms/djangoapps/bulk_user_retirement/views.py
lms/djangoapps/bulk_user_retirement/views.py
""" An API for retiring user accounts. """ import logging from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication from django.contrib.auth import get_user_model from django.db import transaction from rest_framework import permissions, status from rest_framework.response import Response from rest_framework.views import APIView from openedx.core.djangoapps.user_api.accounts.permissions import CanRetireUser from openedx.core.djangoapps.user_api.accounts.utils import create_retirement_request_and_deactivate_account log = logging.getLogger(__name__) class BulkUsersRetirementView(APIView): """ **Use Case** Implementation for Bulk User Retirement API. Creates a retirement request for one or more users. **Example Request** POST /v1/accounts/bulk_retire_users { "usernames": "test_user1, test_user2" } **POST Parameters** A POST request can include the following parameter. * usernames: Comma separated strings of usernames that should be retired. """ authentication_classes = (JwtAuthentication, ) permission_classes = (permissions.IsAuthenticated, CanRetireUser) def post(self, request, **kwargs): # pylint: disable=unused-argument """ Initiates the bulk retirement process for the given users. """ request_usernames = request.data.get('usernames') if request_usernames: usernames_to_retire = [each_username.strip() for each_username in request_usernames.split(',')] else: usernames_to_retire = [] User = get_user_model() successful_user_retirements, failed_user_retirements = [], [] for username in usernames_to_retire: try: user_to_retire = User.objects.get(username=username) with transaction.atomic(): create_retirement_request_and_deactivate_account(user_to_retire) except User.DoesNotExist: log.exception(f'The user "{username}" does not exist.') failed_user_retirements.append(username) except Exception as exc: # pylint: disable=broad-except log.exception(f'500 error retiring account {exc}') failed_user_retirements.append(username) successful_user_retirements = list(set(usernames_to_retire).difference(failed_user_retirements)) return Response( status=status.HTTP_200_OK, data={ "successful_user_retirements": successful_user_retirements, "failed_user_retirements": failed_user_retirements } )
Python
0.99995
@@ -2376,12 +2376,14 @@ s = -list +sorted (set
d56cfbf87c01ac496200341a723ddcee88798a01
Add setup of default translator object so doctests can run when using _. Fixes #509.
pylons/test.py
pylons/test.py
"""Test related functionality Adds a Pylons plugin to `nose <http://www.somethingaboutorange.com/mrl/projects/nose/>`_ that loads the Pylons app *before* scanning for doc tests. This can be configured in the projects :file:`setup.cfg` under a ``[nosetests]`` block: .. code-block:: ini [nosetests] with-pylons=development.ini Alternate ini files may be specified if the app should be loaded using a different configuration. """ import os import sys import nose.plugins import pkg_resources from paste.deploy import loadapp import pylons from pylons.i18n.translation import _get_translator pylonsapp = None class PylonsPlugin(nose.plugins.Plugin): """Nose plugin extension For use with nose to allow a project to be configured before nose proceeds to scan the project for doc tests and unit tests. This prevents modules from being loaded without a configured Pylons environment. """ enabled = False enableOpt = 'pylons_config' name = 'pylons' def add_options(self, parser, env=os.environ): """Add command-line options for this plugin""" env_opt = 'NOSE_WITH_%s' % self.name.upper() env_opt.replace('-', '_') parser.add_option("--with-%s" % self.name, dest=self.enableOpt, type="string", default="", help="Setup Pylons environment with the config file" " specified by ATTR [NOSE_ATTR]") def configure(self, options, conf): """Configure the plugin""" self.config_file = None self.conf = conf if hasattr(options, self.enableOpt): self.enabled = bool(getattr(options, self.enableOpt)) self.config_file = getattr(options, self.enableOpt) def begin(self): """Called before any tests are collected or run Loads the application, and in turn its configuration. """ global pylonsapp path = os.getcwd() sys.path.insert(0, path) pkg_resources.working_set.add_entry(path) self.app = pylonsapp = loadapp('config:' + self.config_file, relative_to=path)
Python
0
@@ -2172,12 +2172,184 @@ ve_to=path)%0A + %0A # For tests that utilize the i18n _ object, initialize a NullTranslator%0A pylons.translator._push_object(_get_translator(pylons.config.get('lang')))%0A
4db28d9f8ae0c3ad22121226c1ec0b59f4258759
Update pylsy.py
pylsy/pylsy.py
pylsy/pylsy.py
# -*- coding: utf-8 -*- from __future__ import print_function class PylsyTable(object): def __init__(self, attributes): self.Attributes = attributes self.Table = [] self.AttributesLength = [] self.cols_num = len(self.Attributes) self.lines_num = 0 for attribute in self.Attributes: col = dict() col[attribute] = "" self.Table.append(col) def print_divide(self): for space in self.AttributesLength: print("+ ", end='') for sign in range(space): print("- ", end='') print("+") def add_data(self, attribute, values): for col in self.Table: if attribute in col: dict_values = [str(value) for value in values] col[attribute] = dict_values def create_table(self): for col in self.Table: values = list(col.values())[0] if self.lines_num < len(values): self.lines_num = len(values) # find the length of longest word in current column key_length = len(list(col.keys())[0]) for value in values: length = len(value) if length > key_length: key_length = length self.AttributesLength.append(key_length) self.print_head() self.print_value() def print_head(self): self.print_divide() print("| ", end='') for spaces, attr in zip(self.AttributesLength, self.Attributes): space_num = spaces * 2 - 1 start = (space_num - len(attr)) // 2 for space in range(start): print(" ", end='') print(attr + ' ', end='') end = space_num - start - len(attr) for space in range(end): print(" ", end='') print("| ", end='') print("") self.print_divide() def print_value(self): for line in range(self.lines_num): for col, length in zip(self.Table, self.AttributesLength): print("| ", end='') value_length = length * 2 - 1 value = list(col.values())[0] if len(value) != 0: start = (value_length - len(value[line])) // 2 for space in range(start): print(" ", end='') print(value[line] + ' ', end='') end = value_length - start - len(value[line]) for space in range(end): print(" ", end='') else: start = 0 end = value_length - start + 1 for space in range(end): print(" ", end='') print("|") self.print_divide()
Python
0
@@ -85,25 +85,27 @@ ct):%0A -%0A + %0A
ad78e28d4537054a0d19643bb7efb1572dd4702c
Encode topic heading as UTF8
app/utils/pdf.py
app/utils/pdf.py
import pdftotext from PIL import Image from wand.image import Image import os import io TOPICS = [ 'Philosophy', 'Society', 'Esoterica', 'Art', 'Culture', 'Science & Nature', 'Gods & Heroes', 'Myths Of The World' ] def extract_first_page(blob): pdf = Image(blob=blob, resolution=200) image = Image( width=pdf.width, height=pdf.height ) image.composite( pdf.sequence[0], top=0, left=0 ) return image.make_blob('png') def extract_topics(pdf_binary): pdf = pdftotext.PDF(io.BytesIO(pdf_binary)) topic_headings = '' for n in range(4, len(pdf)): topic = '' topic_heading = '' for line_no, l in enumerate(pdf[n].split('\n')): words = [w.capitalize() for w in l.strip().split(' ') if w.strip()] if not words: continue if not topic and len(words) < 5: heading = ' '.join(words) if heading in TOPICS: topic = heading continue if topic: line = ' '.join(words) if len(line) < 30 and u'\u201c' not in line: topic_heading += line + ' ' if line_no > 2: break if topic_heading: topic_headings += '{}: {}\n'.format(topic, topic_heading[:-1]) return topic_headings[:-1] if topic_headings else ''
Python
0.999996
@@ -1395,16 +1395,31 @@ _heading +.encode(%22utf8%22) %5B:-1%5D)%0A%0A
747d2563fd566a70420a04d3db209fffc813f147
fix docs/hash-tree.py for python 3
docs/hash-tree.py
docs/hash-tree.py
#!/usr/bin/env python # Write a directory to the Git index. # Prints the directory's SHA-1 to stdout. # # Copyright 2013 Lars Buitinck / University of Amsterdam. # License: MIT (http://opensource.org/licenses/MIT) # https://github.com/larsmans/seqlearn/blob/d7a3d82c/doc/hash-tree.py import os from os.path import split from posixpath import join from subprocess import check_output, Popen, PIPE import sys def hash_file(path): """Write file at path to Git index, return its SHA1 as a string.""" return check_output(["git", "hash-object", "-w", "--", path]).strip() def _lstree(files, dirs): """Make git ls-tree like output.""" for f, sha1 in files: yield "100644 blob {}\t{}\0".format(sha1, f) for d, sha1 in dirs: yield "040000 tree {}\t{}\0".format(sha1, d) def _mktree(files, dirs): mkt = Popen(["git", "mktree", "-z"], stdin=PIPE, stdout=PIPE) return mkt.communicate("".join(_lstree(files, dirs)))[0].strip() def hash_dir(path): """Write directory at path to Git index, return its SHA1 as a string.""" dir_hash = {} for root, dirs, files in os.walk(path, topdown=False): f_hash = ((f, hash_file(join(root, f))) for f in files) d_hash = ((d, dir_hash[join(root, d)]) for d in dirs) # split+join normalizes paths on Windows (note the imports) dir_hash[join(*split(root))] = _mktree(f_hash, d_hash) return dir_hash[path] if __name__ == "__main__": print(hash_dir(sys.argv[1]))
Python
0.00011
@@ -209,16 +209,28 @@ s/MIT)%0A%0A +# Based on:%0A # https: @@ -291,16 +291,16 @@ tree.py%0A - %0Aimport @@ -576,16 +576,25 @@ path%5D). +decode(). strip()%0A @@ -921,31 +921,14 @@ -return mkt.communicate( +inp = %22%22.j @@ -952,16 +952,63 @@ , dirs)) +.encode('ascii')%0A return mkt.communicate(inp )%5B0%5D.str @@ -1007,24 +1007,33 @@ )%5B0%5D.strip() +.decode() %0A%0A%0Adef hash_
34c0c6c73a65da3120aa52600254afc909e9a3bc
Remove unused main and unused imports
pytach/wsgi.py
pytach/wsgi.py
import bottle from bottle import route, run from web import web import config app = application = bottle.Bottle() app.merge(web.app) config.arguments['--verbose'] = True if __name__ == '__main__': app.run(host='0.0.0.0', port=8082, debug=True)
Python
0
@@ -11,37 +11,22 @@ tle%0A -from bottle import route, run +%0Aimport config %0Afro @@ -41,30 +41,16 @@ port web -%0Aimport config %0A%0Aapp = @@ -140,80 +140,4 @@ rue%0A -%0Aif __name__ == '__main__':%0A%09app.run(host='0.0.0.0', port=8082, debug=True)%0A
bea0ead3dfcc055d219966c64437652c0eb2cf84
Update demo.py
python/demo.py
python/demo.py
#! /usr/bin/env python import serial import time import sys # Serial port N = "/dev/ttyUSB0" def ints2str(lst): ''' Taking a list of notes/lengths, convert it to a string ''' s = "" for i in lst: if i < 0 or i > 255: raise Exception s = s + str(chr(i)) return s # do some initialization magic s = serial.Serial(N, 57600, timeout=4) # start code s.write(ints2str([128])) # Full mode s.write(ints2str([132])) # Drive s.write(ints2str([137, 1, 44, 128, 0])) # wait s.write(ints2str([156, 1, 144])) # Turn s.write(ints2str([137, 1, 44, 0, 1])) #wait s.write(ints2str([157, 0, 90])) sys.exit()
Python
0.000001
@@ -46,18 +46,8 @@ ime%0A -import sys %0A%0A# @@ -622,14 +622,10 @@ ))%0A%0A -sys.ex +qu it()
1ca6ccb50992836720e86a7c3c766a5497cf7588
Remove unused import
mint/django_rest/rbuilder/querysets/views.py
mint/django_rest/rbuilder/querysets/views.py
#!/usr/bin/python # # Copyright (c) 2011 rPath, Inc. # # All rights reserved. # from mint.django_rest.deco import return_xml, requires from mint.django_rest.rbuilder import service from mint.django_rest.rbuilder.querysets import manager class BaseQuerySetService(service.BaseService): pass class QuerySetService(BaseQuerySetService): @return_xml def rest_GET(self, request, querySetId=None): return self.get(querySetId) def get(self, querySetId): if querySetId: return self.mgr.getQuerySet(querySetId) else: return self.mgr.getQuerySets() @requires('query_set') @return_xml def rest_POST(self, request, query_set): return self.mgr.addQuerySet(query_set)
Python
0.000001
@@ -178,64 +178,8 @@ vice -%0Afrom mint.django_rest.rbuilder.querysets import manager %0A%0Acl
96874725b98ad0f2944cbb81f154e4f46819bd61
fix non-integer preset id
qmsk/e2/web.py
qmsk/e2/web.py
import asyncio import aiohttp.wsgi import logging; log = logging.getLogger('qmsk.e2.web') import qmsk.e2.client import qmsk.e2.server import qmsk.web.async import qmsk.web.html import qmsk.web.json import qmsk.web.rewrite import qmsk.web.urls import werkzeug import werkzeug.exceptions html = qmsk.web.html.html5 WEB_PORT = 8081 STATIC = './static' class APIBase (qmsk.web.json.JSONMixin, qmsk.web.async.Handler): CORS_ORIGIN = '*' CORS_METHODS = ('GET', 'POST') CORS_HEADERS = ('Content-Type', 'Authorization') CORS_CREDENTIALS = True def render_preset(self, preset): destinations = dict() out = { 'preset': preset.index, 'destinations': destinations, 'title': preset.title, 'group': preset.group.title if preset.group else None, } for destination in preset.destinations: if preset == destination.program: status = 'program' elif preset == destination.preview: status = 'preview' else: status = None destinations[destination.title] = status if status: out[status] = True if preset == self.app.presets.active: out['active'] = True return out class APIIndex(APIBase): def init(self): self.presets = self.app.presets self.seq = self.app.server.seq self.safe = self.app.server.client.safe def render_group (self, group): return { 'title': group.title, 'presets': [preset.index for preset in group.presets], } def render_destination (self, destination): return { 'outputs': destination.index, 'title': destination.title, 'preview': destination.preview.index if destination.preview else None, 'program': destination.program.index if destination.program else None, } def render_json(self): return { 'safe': self.safe, 'seq': self.seq, 'presets': {preset.index: self.render_preset(preset) for preset in self.presets}, 'groups': [self.render_group(group) for group in self.presets.groups], 'destinations': [self.render_destination(destination) for destination in self.presets.destinations], } class APIPreset(APIBase): """ preset: Preset activated preset, or requested preset, or active preset transition: True or int activated transition seq: float current sequence number """ def init(self): self.preset = None self.transition = self.error = None self.seq = self.app.server.seq @asyncio.coroutine def process_async(self, preset=None): """ Raises werkzeug.exceptions.HTTPException. preset: int - preset from URL """ if preset: try: preset = self.app.presets[preset] except KeyError as error: raise werkzeug.exceptions.BadRequest("Invalid preset={preset}".format(preset=preset)) else: preset = None post = self.request_post() if post is not None: try: self.preset, self.transition, self.seq = yield from self.app.process(preset, post) except qmsk.e2.server.SequenceError as error: raise werkzeug.exceptions.BadRequest(error) except qmsk.e2.client.Error as error: raise werkzeug.exceptions.InternalServerError(error) except qmsk.e2.server.Error as error: raise werkzeug.exceptions.InternalServerError(error) elif preset: self.preset = preset else: self.preset = self.app.presets.active def render_json(self): out = { 'seq': self.seq, } if self.preset: out['preset'] = self.render_preset(self.preset) if self.transition is not None: out['transition'] = self.transition return out class API(qmsk.web.async.Application): URLS = qmsk.web.urls.rules({ '/v1/': APIIndex, '/v1/preset/': APIPreset, '/v1/preset/<int:preset>': APIPreset, }) def __init__ (self, server): """ server: qmsk.e2.server.Server """ super().__init__() self.server = server self.presets = server.presets @asyncio.coroutine def process(self, preset, params): """ Process an action request preset: Preset params: { cut: * autotrans: * transition: int seq: float or None } Raises qmsk.e2.client.Error, qmsk.e2.server.Error """ if 'seq' in params: seq = float(params['seq']) else: seq = None if 'cut' in params: transition = 0 elif 'autotrans' in params: transition = True elif 'transition' in params: transition = int(params['transition']) else: transition = None active, seq = yield from self.server.activate(preset, transition, seq) return active, transition, seq import argparse import os.path def parser (parser): group = parser.add_argument_group("qmsk.e2.web Options") group.add_argument('--e2-web-listen', metavar='ADDR', help="Web server listen address") group.add_argument('--e2-web-port', metavar='PORT', type=int, default=WEB_PORT, help="Web server port") group.add_argument('--e2-web-static', metavar='PATH', default=STATIC, help="Web server /static path") @asyncio.coroutine def apply (args, server, loop): """ server: qmsk.e2.server.Server """ # API api = API(server) # WSGI stack fallback = werkzeug.exceptions.NotFound() static = werkzeug.wsgi.SharedDataMiddleware(fallback, { '/': args.e2_web_static, }) dispatcher = werkzeug.wsgi.DispatcherMiddleware(static, { '/api': api, }) application = qmsk.web.rewrite.RewriteMiddleware(dispatcher, { '/': '/index.html', }) # aiohttp Server def server_factory(): return aiohttp.wsgi.WSGIServerHttpProtocol(application, readpayload = True, debug = True, ) server = yield from loop.create_server(server_factory, host = args.e2_web_listen, port = args.e2_web_port, ) return application
Python
0.00082
@@ -2983,23 +2983,13 @@ eset -: int - + pre @@ -4420,12 +4420,8 @@ et/%3C -int: pres @@ -4425,16 +4425,20 @@ reset%3E': + APIPre
aa6a72c419846bc9d1ae5d8f114d214cbc2be60c
Fix randomize without cache
fake_useragent/utils.py
fake_useragent/utils.py
import re import os try: from urllib import urlopen, quote_plus except ImportError: # Python 3 from urllib.request import urlopen from urllib.parse import quote_plus try: import json except ImportError: import simplejson as json from fake_useragent import settings def get(url, annex=None): if not annex is None: url = url % (quote_plus(annex), ) return urlopen(url).read() def get_browsers(): ''' very very hardcoded/dirty re/split stuff, but no dependencies ''' html = get(settings.BROWSERS_STATS_PAGE) html = html.decode('windows-1252') html = html.split('<table class="reference">')[1] html = html.split('<td>&nbsp;</td>')[0] browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE) browsers_statistics = re.findall(r'"right">(.+?)\s', html, re.UNICODE) # TODO: unsure encoding # browsers = list(map( # lambda stat: stat.encode('utf-8', 'ignore'), browsers) # ) # browsers_statistics = list( # map( # lambda stat: stat.encode('utf-8', 'ignore'), # browsers_statistics # ) # ) return list(zip(browsers, browsers_statistics)) def get_browser_versions(browser): ''' very very hardcoded/dirty re/split stuff, but no dependencies ''' html = get(settings.BROWSER_BASE_PAGE, browser) html = html.decode('iso-8859-1') html = html.split('<div id=\'liste\'>')[1] html = html.split('</div>')[0] browsers_iter = re.finditer(r'\.php\'>(.+?)</a', html, re.UNICODE) count = 0 browsers = [] for browser in browsers_iter: if 'more' in browser.group(1).lower(): continue # TODO: ensure encoding # browser.group(1).encode('utf-8', 'ignore') browsers.append(browser.group(1)) count += 1 if count == settings.BROWSERS_COUNT_LIMIT: break return browsers def load(): browsers_dict = {} randomize_dict = {} for item in get_browsers(): browser, percent = item clear_browser = browser.replace(' ', '').lower() browsers_dict[clear_browser] = get_browser_versions(browser) for counter in range(int(float(percent))): randomize_dict[len(randomize_dict)] = clear_browser db = {} db['browsers'] = browsers_dict db['randomize'] = randomize_dict return db def write(data): data = json.dumps(data) # no codecs\with for python 2.5 f = open(settings.DB, 'w+') f.write(data) f.close() def read(): # no codecs\with for python 2.5 f = open(settings.DB, 'r') data = f.read() f.close() return json.loads(data, 'utf-8') def exist(): return os.path.isfile(settings.DB) def rm(): if exist(): os.remove(settings.DB) def refresh(): if exist(): rm() write(load()) def load_cached(): if not exist(): refresh() return read()
Python
0.000005
@@ -2243,16 +2243,20 @@ ze_dict%5B +str( len(rand @@ -2266,16 +2266,17 @@ ze_dict) +) %5D = clea
aa8e51fc8ad969cd04098a5714ff78092b35f58f
Remove unused import
polyaxon/libs/http.py
polyaxon/libs/http.py
import os import requests import shutil import tarfile from urllib.parse import parse_qs, urlencode, urljoin, urlparse, urlunparse from hestia.auth import AuthenticationTypes from hestia.fs import move_recursively from django.conf import settings from libs.api import get_http_api_url def absolute_uri(url): if not url: return None if not settings.API_HOST: return url url = urljoin(settings.API_HOST.rstrip('/') + '/', url.lstrip('/')) return '{}://{}'.format(settings.PROTOCOL, url) def add_notification_referrer_param(url, provider, is_absolute=True): if not is_absolute: url = absolute_uri(url) if not url: return None parsed_url = urlparse(url) query = parse_qs(parsed_url.query) query['referrer'] = provider url_list = list(parsed_url) url_list[4] = urlencode(query, doseq=True) return urlunparse(url_list) def download(url, filename, logger, authentication_type=None, access_token=None, headers=None, internal=True, timeout=60): """Get download url from the internal api.""" if internal: authentication_type = authentication_type or AuthenticationTypes.INTERNAL_TOKEN else: authentication_type = AuthenticationTypes.TOKEN if authentication_type == AuthenticationTypes.INTERNAL_TOKEN and not access_token: access_token = settings.SECRET_INTERNAL_TOKEN # Auth headers if access_token is present request_headers = {} if access_token: request_headers["Authorization"] = "{} {}".format(authentication_type, access_token) # Add any additional headers if headers: request_headers.update(headers) try: if internal: api_url = get_http_api_url() url = '{}/{}'.format(api_url, url) logger.info("Downloading file from %s using %s" % (url, authentication_type)) response = requests.get(url, headers=request_headers, timeout=timeout, stream=True) if response.status_code != 200: logger.error("Failed to download file from %s: %s" % (url, response.status_code), extra={'stack': True}) return None with open(filename, 'wb') as f: logger.info("Processing file %s" % filename) for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) return filename except requests.exceptions.RequestException: logger.error("Download exception", exc_info=True) return None def untar_file(build_path, filename, logger, delete_tar=False, internal=False, tar_suffix=None): extract_path = build_path if internal else '/tmp' if filename and os.path.exists(filename): logger.info("Untarring the contents of the file ...") tar = tarfile.open(filename) tar.extractall(extract_path) tar.close() if delete_tar: logger.info("Cleaning up the tar file ...") os.remove(filename) if not internal: tarf = [f for f in os.listdir(extract_path) if tar_suffix in f] if tarf: src = os.path.join(extract_path, tarf[0]) move_recursively(src, build_path) return filename else: logger.info("File was not found, build_path: %s" % os.listdir(build_path)) return None
Python
0.000001
@@ -23,22 +23,8 @@ sts%0A -import shutil%0A impo
1b4776ddb6ca0f30e4b61393ac37a8f44cfb2af4
fix auto-discovering db config
feedservice/settings.py
feedservice/settings.py
# -*- coding: utf-8 -*- import os, os.path def bool_env(val, default): """Replaces string based environment values with Python booleans""" if not val in os.environ: return default return True if os.environ.get(val) == 'True' else False DEBUG = bool_env('MYGPOFS_DEBUG', True) TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Stefan Kögl', '[email protected]'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Static asset configuration BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.join(BASE_DIR, '../htdocs') STATIC_ROOT = 'static' STATIC_URL = '/media/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'media'), ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'm6jkg5lzard@k^p(wui4gtx_zu4s=26c+c0bk+k1xsik6+derf' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'feedservice.urls' TEMPLATE_DIRS = ( ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', 'feedservice.parse', 'feedservice.urlstore', 'feedservice.webservice', ) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') BASE_URL='http://localhost:8080/' import dj_database_url DATABASES = dj_database_url.config() SOUNDCLOUD_CONSUMER_KEY = os.getenv('MYGPOFS_SOUNDCLOUD_CONSUMER_KEY', '') FLATTR_THING = '' ALLOWED_HOSTS = filter(None, os.getenv('MYGPOFS_ALLOWED_HOSTS', '').split(';')) try: from settings_prod import * except ImportError, e: import sys print >> sys.stderr, 'create settings_prod.py with your customized settings'
Python
0.000002
@@ -18,16 +18,17 @@ -8 -*-%0A%0A +%0A import o @@ -2375,16 +2375,28 @@ ABASES = + %7B'default': dj_data @@ -2412,16 +2412,17 @@ config() +%7D %0A%0ASOUNDC
d86bdec5d7d57fe74cb463e391798bd1e5be87ff
Update Ghana code to match current Pombola
pombola/ghana/urls.py
pombola/ghana/urls.py
from django.conf.urls import patterns, include, url, handler404 from django.views.generic import TemplateView import django.contrib.auth.views from .views import data_upload, info_page_upload urlpatterns = patterns('', url(r'^intro$', TemplateView.as_view(template_name='intro.html')), url(r'^data/upload/mps/$', data_upload, name='data_upload'), url(r'^data/upload/info-page/$', info_page_upload, name='info_page_upload'), #auth views url(r'^accounts/login$', django.contrib.auth.views.login, name='login'), url(r'^accounts/logut$', django.contrib.auth.views.logout, name='logout'), #url(r'^accounts/register$', registration.backends.simple.urls, name='register'), )
Python
0
@@ -36,32 +36,20 @@ ns, +url, include -, url, handler404 %0Afro @@ -95,42 +95,8 @@ iew%0A -import django.contrib.auth.views%0A%0A %0Afro @@ -394,267 +394,52 @@ -%0A #auth views%0A url(r'%5Eaccounts/login$', django.contrib.auth.views.login, name='login'),%0A url(r'%5Eaccounts/logut$', django.contrib.auth.views.logout, name='logout'),%0A #url(r'%5Eaccounts/register$', registration.backends.simple.urls, name='register'),%0A%0A +url('', include('django.contrib.auth.urls')), %0A)%0A
9b75fd09220e61fd511c99e63f8d2b30e6a0f868
stop using deprecated assertEquals()
test_csv2es.py
test_csv2es.py
## Copyright 2015 Ray Holder ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. import csv2es import unittest class TestDelimiter(unittest.TestCase): def test_sanitize(self): self.assertEquals(None, csv2es.sanitize_delimiter(None, False)) self.assertEquals(str('\t'), csv2es.sanitize_delimiter(None, True)) self.assertEquals(str('|'), csv2es.sanitize_delimiter('|', False)) self.assertEquals(str('|'), csv2es.sanitize_delimiter(u'|', False)) self.assertEquals(str('\t'), csv2es.sanitize_delimiter('|', True)) self.assertEquals(str('\t'), csv2es.sanitize_delimiter('||', True)) self.assertRaises(Exception, csv2es.sanitize_delimiter, '||', False) class TestLoading(unittest.TestCase): def test_csv(self): # TODO fill this in self.assertTrue(True) def test_tsv(self): # TODO fill this in self.assertTrue(True) if __name__ == '__main__': unittest.main()
Python
0
@@ -702,17 +702,16 @@ ertEqual -s (None, c @@ -765,33 +765,32 @@ self.assertEqual -s (str('%5Ct'), csv2 @@ -840,33 +840,32 @@ self.assertEqual -s (str('%7C'), csv2e @@ -918,25 +918,24 @@ .assertEqual -s (str('%7C'), c @@ -989,33 +989,32 @@ self.assertEqual -s (str('%5Ct'), csv2 @@ -1071,17 +1071,16 @@ ertEqual -s (str('%5Ct
a309a935b41ed249985882f64f9bc277b8576bd9
Update analyze_nir_intensity.py
plantcv/plantcv/analyze_nir_intensity.py
plantcv/plantcv/analyze_nir_intensity.py
# Analyze signal data in NIR image import os import cv2 import numpy as np import pandas as pd from plotnine import ggplot, aes, geom_line, scale_x_continuous from plantcv.plantcv import print_image from plantcv.plantcv import plot_image from plantcv.plantcv.threshold import binary as binary_threshold from plantcv.plantcv import params from plantcv.plantcv import outputs def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False): """This function calculates the intensity of each pixel associated with the plant and writes the values out to a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant. Inputs: gray_img = 8- or 16-bit grayscale image data mask = Binary mask made from selected contours bins = number of classes to divide spectrum into histplot = if True plots histogram of intensity values Returns: analysis_images = NIR histogram image :param gray_img: numpy array :param mask: numpy array :param bins: int :param histplot: bool :return analysis_images: plotnine ggplot """ # apply plant shaped mask to image mask1 = binary_threshold(mask, 0, 255, 'light') mask1 = (mask1 / 255) # masked = np.multiply(gray_img, mask1) # calculate histogram if gray_img.dtype == 'uint16': maxval = 65536 else: maxval = 256 masked_array = gray_img[np.where(mask > 0)] masked_nir_mean = np.average(masked_array) masked_nir_median = np.median(masked_array) masked_nir_std = np.std(masked_array) # Make a pseudo-RGB image rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR) # Calculate histogram hist_nir = [float(i[0]) for i in cv2.calcHist([gray_img], [0], mask, [bins], [0, maxval])] # Create list of bin labels bin_width = maxval / float(bins) b = 0 bin_labels = [float(b)] for i in range(bins - 1): b += bin_width bin_labels.append(b) # make hist percentage for plotting pixels = cv2.countNonZero(mask1) hist_percent = [(p / float(pixels)) * 100 for p in hist_nir] masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask) if params.debug is not None: params.device += 1 if params.debug == "print": print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + "_masked_nir_plant.png")) if params.debug == "plot": plot_image(masked1) analysis_image = None if histplot is True: hist_x = hist_percent # bin_labels = np.arange(0, bins) dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels, 'Proportion of pixels (%)': hist_x}) fig_hist = (ggplot(data=dataset, mapping=aes(x='Grayscale pixel intensity', y='Proportion of pixels (%)')) + geom_line(color='red') + scale_x_continuous(breaks=list(range(0, maxval, 25)))) analysis_image = fig_hist if params.debug == "print": fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png'), verbose=False) elif params.debug == "plot": print(fig_hist) outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies', method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list, value=hist_nir, label=bin_labels) outputs.add_observation(variable='nir_mean', trait='near-infrared mean', method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float, value=masked_nir_mean, label='none') outputs.add_observation(variable='nir_median', trait='near-infrared median', method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float, value=masked_nir_median, label='none') outputs.add_observation(variable='nir_stdev', trait='near-infrared standard deviation', method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float, value=masked_nir_std, label='none') # Store images outputs.images.append(analysis_image) return analysis_image
Python
0.000002
@@ -436,16 +436,28 @@ ot=False +, label=None ):%0A %22 @@ -925,16 +925,113 @@ y values +%0A label = optional label parameter, modifies the variable name of observations recorded %0A%0A Re @@ -1189,16 +1189,38 @@ t: bool%0A + :param label: str%0A :ret @@ -3424,16 +3424,98 @@ _hist)%0A%0A + if label == None:%0A prefix = %22%22%0A else:%0A prefix = label + %22_%22%0A%0A outp @@ -3539,24 +3539,33 @@ on(variable= +prefix + 'nir_frequen @@ -3811,24 +3811,33 @@ on(variable= +prefix + 'nir_mean', @@ -4068,24 +4068,33 @@ on(variable= +prefix + 'nir_median' @@ -4290,32 +4290,32 @@ , label='none')%0A - outputs.add_ @@ -4335,16 +4335,25 @@ ariable= +prefix + 'nir_std
6edbaa5fb7648aea2de4517c660754c78e7a2baf
Make inference parallelism a parameter.
models/run_inference.py
models/run_inference.py
import argparse import datetime import layers import logging import tensorflow.contrib.slim as slim import tensorflow as tf import utility class ModelInference(utility.ModelConfiguration): def __init__(self, model_checkpoint_path, unclassified_feature_path): utility.ModelConfiguration.__init__(self) self.model_checkpoint_path = model_checkpoint_path self.unclassified_feature_path = unclassified_feature_path self.num_parallel_readers = 16 self.batch_size = 64 self.min_points_for_classification = 250 def run_inference(self, inference_results_path): matching_files_i = tf.matching_files(self.unclassified_feature_path) matching_files = tf.Print(matching_files_i, [matching_files_i], "Files: ") filename_queue = tf.train.input_producer(matching_files, shuffle=False, num_epochs = 1) readers = [] for _ in range(self.num_parallel_readers): reader = utility.cropping_all_slice_feature_file_reader(filename_queue, self.num_feature_dimensions+1, self.max_window_duration_seconds, self.window_max_points, self.min_points_for_classification) readers.append(reader) features, time_ranges, mmsis = tf.train.batch_join(readers, self.batch_size, enqueue_many=True, capacity=1000, shapes=[[1, self.window_max_points, self.num_feature_dimensions], [2], []]) features = self.zero_pad_features(features) logits = layers.misconception_model(features, self.window_size, self.stride, self.feature_depth, self.levels, self.num_classes, False) softmax = slim.softmax(logits) predictions = tf.cast(tf.argmax(softmax, 1), tf.int32) max_probabilities = tf.reduce_max(softmax, [1]) # Open output file, on cloud storage - so what file api? parallelism = 16 config=tf.ConfigProto( inter_op_parallelism_threads=parallelism, intra_op_parallelism_threads=parallelism) with tf.Session(config=config) as sess: init_op = tf.group( tf.initialize_local_variables(), tf.initialize_all_variables()) sess.run(init_op) logging.info("Restoring model: %s", self.model_checkpoint_path) saver = tf.train.Saver() saver.restore(sess, self.model_checkpoint_path) logging.info("Starting queue runners.") tf.train.start_queue_runners() # In a loop, calculate logits and predictions and write out. Will # be terminated when an EOF exception is thrown. logging.info("Running predictions.") i = 0 with open(inference_results_path, 'w') as output_file: while True: logging.info("Inference step: %d", i) i += 1 result = sess.run([mmsis, time_ranges, predictions, max_probabilities]) for mmsi, (start_time_seconds, end_time_seconds), label, max_probability in zip(*result): start_time = datetime.datetime.utcfromtimestamp(start_time_seconds) end_time = datetime.datetime.utcfromtimestamp(end_time_seconds) output_file.write('%d, %s, %s, %s, %.3f\n' % (mmsi, start_time.isoformat(), end_time.isoformat(), utility.VESSEL_CLASS_NAMES[label], max_probability)) # Write predictions to file: mmsi, max_feature, logits. def main(args): logging.getLogger().setLevel(logging.DEBUG) tf.logging.set_verbosity(tf.logging.DEBUG) model_checkpoint_path = args.model_checkpoint_path unclassified_feature_path = args.unclassified_feature_path inference_results_path = args.inference_results_path inference = ModelInference(model_checkpoint_path, unclassified_feature_path) inference.run_inference(inference_results_path) def parse_args(): """ Parses command-line arguments for training.""" argparser = argparse.ArgumentParser('Infer behavioural labels for a set of vessels.') argparser.add_argument('--unclassified_feature_path', required=True, help='The path to the unclassified vessel movement feature directories.') argparser.add_argument('--model_checkpoint_path', required=True, help='Path to the checkpointed model to use for inference.') argparser.add_argument('--inference_results_path', required=True, help='Path to the csv file to dump all inference results.') return argparser.parse_args() if __name__ == '__main__': args = parse_args() main(args)
Python
0.000004
@@ -424,43 +424,8 @@ ath%0A - self.num_parallel_readers = 16%0A @@ -517,16 +517,39 @@ ce(self, + inference_parallelism, inferen @@ -855,24 +855,25 @@ n range( -self.num +inference _paralle @@ -877,16 +877,11 @@ llel -_readers +ism ):%0A @@ -1750,30 +1750,8 @@ pi?%0A - parallelism = 16 %0A @@ -1814,32 +1814,42 @@ llelism_threads= +inference_ parallelism,%0A @@ -1894,16 +1894,26 @@ threads= +inference_ parallel @@ -3534,16 +3534,69 @@ lts_path +%0A inference_parallelism = args.inference_parallelism %0A%0A infe @@ -3694,16 +3694,39 @@ ference( +inference_parallelism, inferenc @@ -4324,16 +4324,156 @@ lts.')%0A%0A + argparser.add_argument('--inference_parallelism', type=int, default=4,%0A help='Path to the csv file to dump all inference results.')%0A%0A return
7a7e66a5be144854099d5ff463fb13944110e752
Revert "configure Flask-Login to remember logged in users across subdomains"
portality/settings.py
portality/settings.py
# ======================== # MAIN SETTINGS # make this something secret in your overriding app.cfg SECRET_KEY = "default-key" # contact info ADMIN_NAME = "DOAJ" ADMIN_EMAIL = "[email protected]" ADMINS = ["[email protected]", "[email protected]"] SUPPRESS_ERROR_EMAILS = False # should be set to False in production and True in staging # service info SERVICE_NAME = "Directory of Open Access Journals" SERVICE_TAGLINE = "" HOST = "0.0.0.0" DOMAIN = "doaj.cottagelabs.com" # facetview needs to access it like a user would, because well, it does run client-side DEBUG = True PORT = 5004 # elasticsearch settings ELASTIC_SEARCH_HOST = "http://localhost:9200" # remember the http:// or https:// #ELASTIC_SEARCH_HOST = "http://doaj.cottagelabs.com:9200" ELASTIC_SEARCH_DB = "doaj" INITIALISE_INDEX = True # whether or not to try creating the index and required index types on startup # can anonymous users get raw JSON records via the query endpoint? PUBLIC_ACCESSIBLE_JSON = True # ======================== # authorisation settings # Can people register publicly? If false, only the superuser can create new accounts # PUBLIC_REGISTER = False SUPER_USER_ROLE = "admin" # remember people are logged in across subdomains # without this, people who log into doaj.org will be asked to login # again on www.doaj.org REMEMBER_COOKIE_DOMAIN = '.' + DOMAIN # FIXME: something like this required for hierarchical roles, but not yet needed #ROLE_MAP = { # "admin" : {"publisher", "create_user"} #} # ======================== # MAPPING SETTINGS # a dict of the ES mappings. identify by name, and include name as first object name # and identifier for how non-analyzed fields for faceting are differentiated in the mappings FACET_FIELD = ".exact" MAPPINGS = { "journal" : { "journal" : { "dynamic_templates" : [ { "default" : { "match" : "*", "match_mapping_type": "string", "mapping" : { "type" : "multi_field", "fields" : { "{name}" : {"type" : "{dynamic_type}", "index" : "analyzed", "store" : "no"}, "exact" : {"type" : "{dynamic_type}", "index" : "not_analyzed", "store" : "yes"} } } } } ] } } } MAPPINGS['account'] = {'account':MAPPINGS['journal']['journal']} MAPPINGS['article'] = {'article':MAPPINGS['journal']['journal']} MAPPINGS['suggestion'] = {'suggestion':MAPPINGS['journal']['journal']} MAPPINGS['upload'] = {'upload':MAPPINGS['journal']['journal']} MAPPINGS['cache'] = {'cache':MAPPINGS['journal']['journal']} # ======================== # QUERY SETTINGS # list index types that should not be queryable via the query endpoint NO_QUERY = ['account'] # list additional terms to impose on anonymous users of query endpoint # for each index type that you wish to have some # must be a list of objects that can be appended to an ES query.bool.must # for example [{'term':{'visible':True}},{'term':{'accessible':True}}] ANONYMOUS_SEARCH_TERMS = { # "pages": [{'term':{'visible':True}},{'term':{'accessible':True}}] } # a default sort to apply to query endpoint searches # for each index type that you wish to have one # for example {'created_date' + FACET_FIELD : {"order":"desc"}} DEFAULT_SORT = { # "pages": {'created_date' + FACET_FIELD : {"order":"desc"}} } # ======================== # MEDIA SETTINGS # location of media storage folder MEDIA_FOLDER = "media" # ======================== # PAGEMANAGER SETTINGS # folder name for storing page content # will be added under the templates/pagemanager route CONTENT_FOLDER = "content" # etherpad endpoint if available for collaborative editing COLLABORATIVE = 'http://localhost:9001' # when a page is deleted from the index should it also be removed from # filesystem and etherpad (if they are available in the first place) DELETE_REMOVES_FS = False # True / False DELETE_REMOVES_EP = False # MUST BE THE ETHERPAD API-KEY OR DELETES WILL FAIL # disqus account shortname if available for page comments COMMENTS = '' # ======================== # HOOK SETTINGS REPOS = { "portality": { "path": "/opt/portality/src/portality" }, "content": { "path": "/opt/portality/src/portality/portality/templates/pagemanager/content" } } # ======================== # FEED SETTINGS FEED_TITLE = "Directory of Open Access Journals" BASE_URL = "http://doaj.org" # Maximum number of feed entries to be given in a single response. If this is omitted, it will # default to 20 MAX_FEED_ENTRIES = 100 # Maximum age of feed entries (in seconds) (default value here is 30 days). MAX_FEED_ENTRY_AGE = 2592000 # NOT USED IN THIS IMPLEMENTATION # Which index to run feeds from #FEED_INDEX = "journal" # Licensing terms for feed content FEED_LICENCE = "(c) DOAJ 2013. CC-BY-SA." # name of the feed generator (goes in the atom:generator element) FEED_GENERATOR = "CottageLabs feed generator" # Larger image to use as the logo for all of the feeds FEED_LOGO = "http://www.doaj.org/static/doaj/images/favicon.ico" # ============================ # OAI-PMH SETTINGS OAIPMH_METADATA_FORMATS = [ { "metadataPrefix" : "oai_dc", "schema" : "http://www.openarchives.org/OAI/2.0/oai_dc.xsd", "metadataNamespace" : "http://www.openarchives.org/OAI/2.0/oai_dc/" } ] OAIPMH_IDENTIFIER_NAMESPACE = "doaj.org" OAIPMH_LIST_RECORDS_PAGE_SIZE = 100 OAIPMH_LIST_IDENTIFIERS_PAGE_SIZE = 300 OAIPMH_RESUMPTION_TOKEN_EXPIRY = 86400 # ================================= # File Upload settings UPLOAD_DIR = "upload" # ================================= # ReCaptcha settings # We use per-domain, not global keys RECAPTCHA_PUBLIC_KEY = '6LdaE-wSAAAAAKTofjeh5Zn94LN1zxzbrhxE8Zxr' # RECAPTCHA_PRIVATE_KEY is set in secret_settings.py which should not be # committed to the repository, but only held locally and on the server # (transfer using scp). # ================================= # Cache settings # number of seconds site statistics should be considered fresh # 1800s = 30mins SITE_STATISTICS_TIMEOUT = 1800
Python
0
@@ -1190,190 +1190,8 @@ n%22%0A%0A -# remember people are logged in across subdomains%0A# without this, people who log into doaj.org will be asked to login%0A# again on www.doaj.org%0AREMEMBER_COOKIE_DOMAIN = '.' + DOMAIN%0A%0A%0A # FI
bce19fd89fc82f2d18bd1cc210d94255800a2d5c
Use relative import for Python 3 support
molo/commenting/admin_views.py
molo/commenting/admin_views.py
from django.contrib import messages from django.shortcuts import redirect from django.views.generic import FormView from django_comments.views.comments import post_comment from molo.commenting.forms import AdminMoloCommentReplyForm from tasks import send_export_email from wagtail.contrib.modeladmin.views import IndexView class MoloCommentsAdminView(IndexView): def send_export_email_to_celery(self, email, arguments): send_export_email.delay(email, arguments) def post(self, request, *args, **kwargs): if not request.user.email: messages.error( request, ( "Your email address is not configured. " "Please update it before exporting.")) return redirect(request.path) drf__submit_date__gte = request.GET.get('drf__submit_date__gte') drf__submit_date__lte = request.GET.get('drf__submit_date__lte') is_staff = request.GET.get('user__is_staff__exact') is_removed__exact = request.GET.get('is_removed__exact') filter_list = { 'submit_date__range': (drf__submit_date__gte, drf__submit_date__lte) if drf__submit_date__gte and drf__submit_date__lte else None, 'is_removed': is_removed__exact, 'user__is_staff': is_staff } arguments = {'wagtail_site': request.site.pk} for key, value in filter_list.items(): if value: arguments[key] = value self.send_export_email_to_celery(request.user.email, arguments) messages.success(request, ( "CSV emailed to '{0}'").format(request.user.email)) return redirect(request.path) def get_template_names(self): return 'admin/molo_comments_admin.html' class MoloCommentsAdminReplyView(FormView): form_class = AdminMoloCommentReplyForm template_name = 'admin/molo_comments_admin_reply.html' def get_form_kwargs(self): kwargs = super(MoloCommentsAdminReplyView, self).get_form_kwargs() kwargs['parent'] = self.kwargs['parent'] return kwargs def form_valid(self, form): self.request.POST = self.request.POST.copy() self.request.POST['name'] = '' self.request.POST['url'] = '' self.request.POST['email'] = '' self.request.POST['parent'] = self.kwargs['parent'] post_comment(self.request) messages.success(self.request, ('Reply successfully created.')) return redirect('/admin/commenting/molocomment/')
Python
0
@@ -1,20 +1,58 @@ +from .tasks import send_export_email%0A%0A from django.contrib @@ -267,44 +267,8 @@ orm%0A -from tasks import send_export_email%0A from
96f9e225a8b490bd93e016ed42dfb133290a47f5
add test
molo/core/tests/test_search.py
molo/core/tests/test_search.py
from django.test import TestCase from django.core.urlresolvers import reverse from django.test.client import Client from wagtail.wagtailsearch.backends import get_search_backend from molo.core.models import SiteLanguageRelation, \ Main, Languages from molo.core.tests.base import MoloTestCaseMixin class TestSearch(TestCase, MoloTestCaseMixin): def setUp(self): self.client = Client() # Creates Main language self.mk_main() main = Main.objects.all().first() self.english = SiteLanguageRelation.objects.create( language_setting=Languages.for_site(main.get_site()), locale='en', is_active=True) self.french = SiteLanguageRelation.objects.create( language_setting=Languages.for_site(main.get_site()), locale='fr', is_active=True) # Creates a section under the index page self.english_section = self.mk_section( self.section_index, title='English section') self.mk_main2() self.main2 = Main.objects.all().last() self.language_setting2 = Languages.objects.create( site_id=self.main2.get_site().pk) self.english2 = SiteLanguageRelation.objects.create( language_setting=self.language_setting2, locale='en', is_active=True) self.spanish = SiteLanguageRelation.objects.create( language_setting=self.language_setting2, locale='es', is_active=True) self.yourmind2 = self.mk_section( self.section_index2, title='Your mind2') self.yourmind_sub2 = self.mk_section( self.yourmind2, title='Your mind subsection2') def test_search(self): self.backend = get_search_backend('default') self.backend.reset_index() self.mk_articles(self.english_section, count=20) self.backend.refresh_index() response = self.client.get(reverse('search'), { 'q': 'Test' }) self.assertContains(response, 'Page 1 of 2') self.assertContains(response, '&rarr;') self.assertNotContains(response, '&larr;') response = self.client.get(reverse('search'), { 'q': 'Test', 'p': '2', }) self.assertContains(response, 'Page 2 of 2') self.assertNotContains(response, '&rarr;') self.assertContains(response, '&larr;') response = self.client.get(reverse('search'), { 'q': 'Test', 'p': 'foo', }) self.assertContains(response, 'Page 1 of 2') response = self.client.get(reverse('search'), { 'q': 'Test', 'p': '4', }) self.assertContains(response, 'Page 2 of 2') response = self.client.get(reverse('search'), { 'q': 'magic' }) self.assertContains(response, 'No search results for magic') response = self.client.get(reverse('search')) self.assertContains(response, 'No search results for None') def test_search_works_with_multisite(self): self.backend = get_search_backend('default') self.backend.reset_index() self.mk_article( self.english_section, title="Site 1 article") self.mk_article( self.yourmind2, title="Site 2 article") self.backend.refresh_index() response = self.client.get(reverse('search'), { 'q': 'article' }) self.assertContains(response, 'Site 1 article') self.assertNotContains(response, 'Site 2 article') client = Client(HTTP_HOST=self.site2.hostname) response = client.get(reverse('search'), { 'q': 'article' }) self.assertNotContains(response, 'Site 1 article') self.assertContains(response, 'Site 2 article') response = self.client.get(reverse('search'), { 'q': 'magic' }) self.assertContains(response, 'No search results for magic') response = self.client.get(reverse('search')) self.assertContains(response, 'No search results for None') def test_search_works_with_multilanguages(self): self.backend = get_search_backend('default') self.backend.reset_index() eng_article = self.mk_article( self.english_section, title="English article") self.mk_article_translation( eng_article, self.french, title='French article') self.backend.refresh_index() self.client.get('/locale/en/') response = self.client.get(reverse('search'), { 'q': 'article' }) self.assertContains(response, 'English article') self.assertNotContains(response, 'French article') self.client.get('/locale/fr/') response = self.client.get(reverse('search'), { 'q': 'article' }) self.assertContains(response, 'French article') self.assertNotContains(response, 'English article')
Python
0.000002
@@ -245,16 +245,28 @@ anguages +, FooterPage %0Afrom mo @@ -1730,24 +1730,631 @@ section2')%0A%0A + def test_search_only_includes_articles(self):%0A self.backend = get_search_backend('default')%0A self.backend.reset_index()%0A self.mk_articles(self.english_section, count=2)%0A footer = FooterPage(title='Test Footer')%0A self.footer_index.add_child(instance=footer)%0A footer.save_revision().publish()%0A self.backend.refresh_index()%0A response = self.client.get(reverse('search'), %7B%0A 'q': 'Test'%0A %7D)%0A results = response.context%5B'results'%5D%0A for article in results:%0A self.assertNotEquals(article.title, 'Test Footer')%0A%0A def test
95e32115ec157d9764dd96b693bfe730929b76f2
Add shortcut imports
flask_email/__init__.py
flask_email/__init__.py
# -*- coding: utf-8 -*- """ flask.ext.email ~~~~~~~~~~~~~ Flask extension for sending email. """ __version__ = '1.4.3' """ Tools for sending email. """ from flask import current_app as app from .utils import import_module # Imported for backwards compatibility, and for the sake # of a cleaner namespace. These symbols used to be in # django/core/mail.py before the introduction of email # backends and the subsequent reorganization (See #10355) from .utils import CachedDnsName, DNS_NAME from .message import ( EmailMessage, EmailMultiAlternatives, SafeMIMEText, SafeMIMEMultipart, DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid, BadHeaderError, forbid_multi_line_headers) def get_connection(backend=None, fail_silently=False, **kwargs): """ Load an email backend and return an instance of it. If backend is None (default) EMAIL_BACKEND is used. Both fail_silently and other keyword arguments are used in the constructor of the backend. """ path = backend or app.config.get('EMAIL_BACKEND', 'flask.ext.email.backends.locmem.Mail') try: mod_name, klass_name = path.rsplit('.', 1) mod = import_module(mod_name) except ImportError, e: raise Exception(('Error importing email backend module %s: "%s"' % (mod_name, e))) try: klass = getattr(mod, klass_name) except AttributeError: raise Exception(('Module "%s" does not define a ' '"%s" class' % (mod_name, klass_name))) return klass(app, fail_silently=fail_silently, **kwargs) def send_mail(subject, message, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) return EmailMessage(subject, message, from_email, recipient_list, connection=connection).send() def send_mass_mail(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a datatuple of (subject, message, from_email, recipient_list), sends each message to each recipient list. Returns the number of emails sent. If from_email is None, the DEFAULT_FROM_EMAIL setting is used. If auth_user and auth_password are set, they're used to log in. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) messages = [EmailMessage(subject, message, sender, recipient) for subject, message, sender, recipient in datatuple] return connection.send_messages(messages) def mail_admins(subject, message, fail_silently=False, connection=None, html_message=None): """Sends a message to the admins, as defined by the ADMINS setting.""" if not app.config.get('ADMINS', None): return mail = EmailMultiAlternatives(u'%s%s' % (app.config.get('EMAIL_SUBJECT_PREFIX', '[Flask] '), subject), message, app.config.get('SERVER_EMAIL', 'root@localhost'), [a[1] for a in app.config['ADMINS']], connection=connection) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently) def mail_managers(subject, message, fail_silently=False, connection=None, html_message=None): """Sends a message to the managers, as defined by the MANAGERS setting.""" if not app.config.get('MANAGERS', None): return mail = EmailMultiAlternatives(u'%s%s' % (app.config.get('EMAIL_SUBJECT_PREFIX', '[Flask] '), subject), message, app.config.get('SERVER_EMAIL', 'root@localhost'), [a[1] for a in app.config['MANAGERS']], connection=connection) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently)
Python
0.000002
@@ -694,16 +694,302 @@ eaders)%0A +from .backends.console import Mail as ConsoleMail%0Afrom .backends.dummy import Mail as DummyMail%0Afrom .backends.filebased import Mail as FilebasedMail%0Afrom .backends.locmem import Mail as LocmemMail%0Afrom .backends.smtp import Mail as SMTPMail%0Afrom .backends.rest import Mail as RESTMail%0A %0A%0Adef ge
97a8a349d26b364e57aaac6f8d920770810aa8d8
Correct localized strings
src/sentry/constants.py
src/sentry/constants.py
""" sentry.constants ~~~~~~~~~~~~~~~~ These settings act as the default (base) settings for the Sentry-provided web-server :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ SORT_OPTIONS = SortedDict(( ('priority', _('Priority')), ('date', _('Last Seen')), ('new', _('First Seen')), ('freq', _('Frequency')), ('tottime', _('Total Time Spent')), ('avgtime', _('Average Time Spent')), ('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})), ('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})), )) SORT_CLAUSES = { 'priority': 'sentry_groupedmessage.score', 'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)', 'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)', 'freq': 'sentry_groupedmessage.times_seen', 'tottime': 'sentry_groupedmessage.time_spent_total', 'avgtime': '(sentry_groupedmessage.time_spent_total / sentry_groupedmessage.time_spent_count)', } SCORE_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES.update({ 'date': 'sentry_groupedmessage.last_seen', 'new': 'sentry_groupedmessage.first_seen', }) SQLITE_SCORE_CLAUSES = SQLITE_SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES.update({ 'date': 'sentry_groupedmessage.last_seen', 'new': 'sentry_groupedmessage.first_seen', }) MYSQL_SCORE_CLAUSES = SCORE_CLAUSES.copy() MYSQL_SCORE_CLAUSES.update({ 'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)', 'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)', }) SEARCH_SORT_OPTIONS = SortedDict(( ('score', _('Score')), ('date', _('Last Seen')), ('new', _('First Seen')), )) STATUS_UNRESOLVED = 0 STATUS_RESOLVED = 1 STATUS_MUTED = 2 STATUS_LEVELS = ( (STATUS_UNRESOLVED, _('unresolved')), (STATUS_RESOLVED, _('resolved')), (STATUS_MUTED, _('muted')), ) MEMBER_OWNER = 0 MEMBER_USER = 50 MEMBER_SYSTEM = 100 MEMBER_TYPES = ( (MEMBER_OWNER, _('admin')), (MEMBER_USER, _('user')), (MEMBER_SYSTEM, _('system agent')), )
Python
0.999887
@@ -1992,17 +1992,17 @@ VED, _(' -u +U nresolve @@ -2032,17 +2032,17 @@ VED, _(' -r +R esolved' @@ -2067,17 +2067,17 @@ TED, _(' -m +M uted')), @@ -2173,17 +2173,17 @@ NER, _(' -a +A dmin')), @@ -2204,17 +2204,17 @@ SER, _(' -u +U ser')),%0A @@ -2240,16 +2240,16 @@ _(' -s +S ystem -a +A gent