commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
4c547687662f7ea2a12d876291adb6e0bed85fc8
Fix database relationships
database.py
database.py
# # database.py # # set up and manage a database for storing data between sessions # from sqlalchemy import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker Base = declarative_base() class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String(250), nullable=False) class Server(Base): __tablename__ = 'servers' id = Column(Integer, primary_key=True) name = Column(String(250), nullable=False) owner = Column(Integer, ForeignKey('users.id')) class Role(Base): __tablename__ = 'roles' id = Column(Integer, primary_key=True) name = Column(String(250), nullable=False) server = Column(Integer, ForeignKey('servers.id')) class Channel(Base): __tablename__ = 'channels' id = Column(Integer, primary_key=True) name = Column(String(250), nullable=False) server = Column(Integer, ForeignKey('servers.id')) squelch = Column(Boolean, nullable=False) class CommandClass(Base): __tablename__ = 'commandclasses' id = Column(Integer, primary_key=True) name = Column(String(250), nullable=False) class Command(Base): __tablename__ = 'commands' id = Column(Integer, primary_key=True) name = Column(String(250), nullable=False) squelch = Column(Boolean, nullable=False) command_class = Column(Integer, ForeignKey('commandclasses.id')) class RoleCommandAccess(Base): __tablename__ = 'rolecommands' id = Column(Integer, primary_key=True) role = Column(Integer, ForeignKey('roles.id')) command = Column(Integer, ForeignKey('commands.id')) squelch = Column(Boolean, nullable=False) class RoleCommandClassAccess(Base): __tablename__ = 'rolecommandclasses' id = Column(Integer, primary_key=True) role = Column(Integer, ForeignKey('roles.id')) command_class = Column(Integer, ForeignKey('commandclasses.id')) squelch = Column(Boolean, nullable=False) class UserCommandAccess(Base): __tablename__ = 'usercommands' id = Column(Integer, primary_key=True) user = Column(Integer, ForeignKey('users.id')) command = Column(Integer, ForeignKey('commands.id')) squelch = Column(Boolean, nullable=False) class UserCommandClassAccess(Base): __tablename__ = 'usercommandclasses' id = Column(Integer, primary_key=True) user = Column(Integer, ForeignKey('users.id')) command_class = Column(Integer, ForeignKey('commandclasses.id')) squelch = Column(Boolean, nullable=False) # Create an engine that stores data in the local directory's # sqlalchemy_example.db file. engine = create_engine('sqlite:///susumu_takuan.db') # Create all tables in the engine. This is equivalent to "Create Table" # statements in raw SQL. Base.metadata.create_all(engine) DBSession = sessionmaker(bind=engine)
Python
0.000003
@@ -634,16 +634,19 @@ owner +.id = Colum @@ -671,32 +671,97 @@ Key('users.id')) +%0A owner = relationship(User, backref('servers', uselist=True)) %0A%0Aclass Role(Bas @@ -884,32 +884,35 @@ alse)%0A server +.id = Column(Intege @@ -935,24 +935,90 @@ ervers.id')) +%0A server = relationship(Server, backref('roles', uselist=True)) %0A%0Aclass Chan @@ -1159,16 +1159,19 @@ server +.id = Colum @@ -1203,24 +1203,90 @@ rvers.id'))%0A + server = relationship(Server, backref('roles', uselist=True))%0A squelch @@ -1671,32 +1671,35 @@ command_class +.id = Column(Intege @@ -1733,16 +1733,98 @@ es.id')) +%0A command_class = relationship(CommandClass, backref('commands', uselist=True)) %0A%0Aclass @@ -1968,32 +1968,35 @@ s.id'))%0A%09command +.id = Column(Intege @@ -2017,32 +2017,104 @@ 'commands.id'))%0A +%09command = relationship(Command, backref('rolecommands', uselist=True))%0A %09squelch = Colum @@ -2309,32 +2309,35 @@ )%0A%09command_class +.id = Column(Intege @@ -2364,32 +2364,114 @@ ndclasses.id'))%0A + command_class = relationship(CommandClass, backref('commands', uselist=True))%0A %09squelch = Colum @@ -2657,16 +2657,19 @@ %09command +.id = Colum @@ -2698,32 +2698,104 @@ 'commands.id'))%0A +%09command = relationship(Command, backref('rolecommands', uselist=True))%0A %09squelch = Colum @@ -2998,16 +2998,19 @@ nd_class +.id = Colum @@ -3053,16 +3053,98 @@ s.id'))%0A + command_class = relationship(CommandClass, backref('commands', uselist=True))%0A %09squelch
3b31d4538b3bd0ac3ee0708b5e67c2ecb6623c09
Allow merging when having duplicated keys
systrace/systrace/output_generator.py
systrace/systrace/output_generator.py
#!/usr/bin/env python # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import base64 import gzip import json import os import StringIO from systrace import tracing_controller from systrace import trace_result # TODO(alexandermont): Current version of trace viewer does not support # the controller tracing agent output. Thus we use this variable to # suppress this tracing agent's output. This should be removed once # trace viewer is working again. OUTPUT_CONTROLLER_TRACE_ = False CONTROLLER_TRACE_DATA_KEY = 'controllerTraceDataKey' def GenerateHTMLOutput(trace_results, output_file_name): """Write the results of systrace to an HTML file. Args: trace_results: A list of TraceResults. output_file_name: The name of the HTML file that the trace viewer results should be written to. """ def _ReadAsset(src_dir, filename): return open(os.path.join(src_dir, filename)).read() systrace_dir = os.path.abspath(os.path.dirname(__file__)) try: from systrace import update_systrace_trace_viewer except ImportError: pass else: update_systrace_trace_viewer.update() trace_viewer_html = _ReadAsset(systrace_dir, 'systrace_trace_viewer.html') # Open the file in binary mode to prevent python from changing the # line endings, then write the prefix. systrace_dir = os.path.abspath(os.path.dirname(__file__)) html_prefix = _ReadAsset(systrace_dir, 'prefix.html') html_suffix = _ReadAsset(systrace_dir, 'suffix.html') trace_viewer_html = _ReadAsset(systrace_dir, 'systrace_trace_viewer.html') # Open the file in binary mode to prevent python from changing the # line endings, then write the prefix. html_file = open(output_file_name, 'wb') html_file.write(html_prefix.replace('{{SYSTRACE_TRACE_VIEWER_HTML}}', trace_viewer_html)) # Write the trace data itself. There is a separate section of the form # <script class="trace-data" type="application/text"> ... </script> # for each tracing agent (including the controller tracing agent). html_file.write('<!-- BEGIN TRACE -->\n') for result in trace_results: if (result.source_name == tracing_controller.TRACE_DATA_CONTROLLER_NAME and not OUTPUT_CONTROLLER_TRACE_): continue html_file.write(' <script class="trace-data" type="application/text">\n') html_file.write(_ConvertToHtmlString(result.raw_data)) html_file.write(' </script>\n') html_file.write('<!-- END TRACE -->\n') # Write the suffix and finish. html_file.write(html_suffix) html_file.close() final_path = os.path.abspath(output_file_name) return final_path def _ConvertToHtmlString(result): """Convert a trace result to the format to be output into HTML. If the trace result is a dictionary or list, JSON-encode it. If the trace result is a string, leave it unchanged. """ if isinstance(result, dict) or isinstance(result, list): return json.dumps(result) elif isinstance(result, str): return result else: raise ValueError('Invalid trace result format for HTML output') def GenerateJSONOutput(trace_results, output_file_name): """Write the results of systrace to a JSON file. Args: trace_results: A list of TraceResults. output_file_name: The name of the JSON file that the trace viewer results should be written to. """ results = _ConvertTraceListToDictionary(trace_results) results[CONTROLLER_TRACE_DATA_KEY] = ( tracing_controller.TRACE_DATA_CONTROLLER_NAME) if not OUTPUT_CONTROLLER_TRACE_: results[tracing_controller.TRACE_DATA_CONTROLLER_NAME] = [] with open(output_file_name, 'w') as json_file: json.dump(results, json_file) final_path = os.path.abspath(output_file_name) return final_path def MergeTraceResultsIfNeeded(trace_results): """Merge a list of trace data, if possible. This function can take any list of trace data, but it will only merge the JSON data (since that's all we can merge). Args: trace_results: A list of TraceResults containing trace data. """ if len(trace_results) <= 1: return trace_results merge_candidates = [] for result in trace_results: # Try to detect a JSON file cheaply since that's all we can merge. if result.raw_data[0] != '{': continue try: json_data = json.loads(result.raw_data) except ValueError: continue merge_candidates.append(trace_result.TraceResult(result.source_name, json_data)) if len(merge_candidates) <= 1: return trace_results other_results = [r for r in trace_results if not r.source_name in [c.source_name for c in merge_candidates]] merged_data = merge_candidates[0].raw_data for candidate in merge_candidates[1:]: json_data = candidate.raw_data for key, value in json_data.items(): if not str(key) in merged_data or str(key) in json_data: merged_data[str(key)] = value return ([trace_result.TraceResult('merged-data', json.dumps(merged_data))] + other_results) def _EncodeTraceData(trace_string): compressed_trace = StringIO.StringIO() with gzip.GzipFile(fileobj=compressed_trace, mode='w') as f: f.write(trace_string) b64_content = base64.b64encode(compressed_trace.getvalue()) return b64_content def _ConvertTraceListToDictionary(trace_list): trace_dict = {} for trace in trace_list: trace_dict[trace.source_name] = trace.raw_data return trace_dict
Python
0
@@ -5086,29 +5086,33 @@ or -str(key) in json_data +not merged_data%5Bstr(key)%5D :%0A
d017c2a2e09d043caecd555217a399453c7e60b8
fix migration imports
eventstore/migrations/0050_askfeedback.py
eventstore/migrations/0050_askfeedback.py
# Generated by Django 2.2.24 on 2021-12-07 06:26 import django.contrib.postgres.fields.jsonb from django.db import migrations, models import django.utils.timezone import uuid class Migration(migrations.Migration): dependencies = [("eventstore", "0049_auto_20211202_1220")] operations = [ migrations.CreateModel( name="AskFeedback", fields=[ ( "id", models.UUIDField( default=uuid.uuid4, editable=False, primary_key=True, serialize=False, ), ), ("contact_id", models.UUIDField()), ("question_answered", models.BooleanField(default=False)), ("timestamp", models.DateTimeField(default=django.utils.timezone.now)), ( "created_by", models.CharField(blank=True, default="", max_length=255), ), ( "data", django.contrib.postgres.fields.jsonb.JSONField( blank=True, default=dict, null=True ), ), ], ) ]
Python
0.000001
@@ -43,16 +43,29 @@ 06:26%0A%0A +import uuid%0A%0A import d @@ -100,16 +100,45 @@ s.jsonb%0A +import django.utils.timezone%0A from dja @@ -174,49 +174,8 @@ els%0A -import django.utils.timezone%0Aimport uuid%0A %0A%0Acl
daed646ff987bc86b333a995bac1283360a583ef
bump up version to 0.1.2
src/javactl/__init__.py
src/javactl/__init__.py
__version__ = '0.1.1'
Python
0.000007
@@ -12,11 +12,11 @@ = '0.1. -1 +2 '%0A
fce501b446d2a4133a244f86653bdc683f4f03de
test project manager using initial DB & validation code added
buildbuild/projects/tests/test_project_manager.py
buildbuild/projects/tests/test_project_manager.py
from django.test import TestCase from projects.models import Project from teams.models import Team from django.db import IntegrityError from django.core.exceptions import ValidationError class TestProjectName(TestCase): def setUp(self): self.name = "test_project_name" self.second_name = "test_second_project_name" self.invalid_long_length_name = "a" * 65 self.team_name = "test_team_name" self.lang = "python" self.ver = "2.7.8" self.project = Project.objects.create_project( name = self.name, ) self.second_project = Project.objects.create_project( name = self.second_name, ) def test_create_project_must_contain_name(self): self.assertRaises( TypeError, Project.objects.create_project, team_name = self.team_name, properties = ('python','2.7.8') ) def test_create_project_name_min_length_1(self): try: project = Project.objects.create_project( name = "" ) except ValidationError: pass def test_project_name_max_length_64(self): try: Project.objects.create_project( name = self.invalid_long_length_name, ) except ValidationError: pass def test_get_all_projects(self): projects = Project.objects.all() self.assertEqual(projects[0].name, self.project.name) self.assertEqual(projects[1].name, self.second_project.name) # Integrity def test_project_should_have_unique_name(self): try: Project.objects.create_project( name = self.name, ) except IntegrityError: pass # Assert def test_get_project_equal_to_project_targetted(self): get_project = Project.objects.get_project(self.project.id) self.assertEqual( self.project, get_project, "get_project should be equal to target project", )
Python
0
@@ -214,16 +214,56 @@ tCase):%0A + fixtures = %5B'properties_data.yaml'%5D%0A def @@ -477,16 +477,23 @@ elf.lang +_python = %22pyth @@ -512,16 +512,27 @@ self.ver +_python_278 = %222.7. @@ -961,26 +961,48 @@ s = -('python','2.7.8') +%7Bself.lang_python : self.ver_python_278%7D %0A @@ -1074,34 +1074,67 @@ -try:%0A project = +self.assertRaises(%0A ValidationError,%0A Pro @@ -1152,38 +1152,34 @@ s.create_project -(%0A +,%0A name @@ -1183,21 +1183,18 @@ ame = %22%22 +, %0A - @@ -1198,57 +1198,8 @@ ) -%0A except ValidationError:%0A pass %0A%0A @@ -1255,20 +1255,55 @@ -try: +self.assertRaises(%0A ValidationError, %0A @@ -1333,38 +1333,34 @@ s.create_project -(%0A +,%0A name @@ -1405,62 +1405,9 @@ - )%0A except ValidationError:%0A pass +) %0A%0A @@ -1618,20 +1618,8 @@ e)%0A%0A -# Integrity%0A @@ -1627,24 +1627,30 @@ ef test_ +check_ project_ should_h @@ -1645,20 +1645,8 @@ ect_ -should_have_ uniq @@ -1672,13 +1672,56 @@ -try:%0A +self.assertRaises(%0A IntegrityError,%0A @@ -1749,38 +1749,34 @@ s.create_project -(%0A +,%0A name @@ -1802,71 +1802,10 @@ - - )%0A except IntegrityError:%0A pass%0A%0A# Assert +)%0A %0A @@ -2085,24 +2085,317 @@ ,%0A )%0A +%0A + def test_properties_field_must_dict(self):%0A self.assertRaises(%0A TypeError,%0A Project.objects.create_project,%0A name = self.project.name,%0A team_name = self.team_name,%0A properties = (self.lang_python, self.ver_python_278)%0A )%0A %0A
be778b351e6b6af18a786265851142a1b9dd420a
remove erroneous quotes in isinstance()
networkx/classes/labeledgraph.py
networkx/classes/labeledgraph.py
from graph import Graph from digraph import DiGraph from networkx.exception import NetworkXException, NetworkXError import networkx.convert as convert class LabeledGraph(Graph): def __init__(self, data=None, name='', weighted=True): super(LabeledGraph,self).__init__(data,name,weighted) # node labels if hasattr(data,'label') and isinstance(data.label,'dict'): self.label=data.label.copy() else: self.label = {} def add_node(self, n, data=None): super(LabeledGraph,self).add_node(n) if data is not None: self.label[n]=data def add_nodes_from(self, nbunch, data=None): for nd in nbunch: try: n,data=nd except (TypeError,ValueError): n=nd data=None self.add_node(n,data) def remove_node(self, n): super(LabeledGraph,self).remove_node(n) try: del self.label[n] except KeyError: pass def remove_nodes_from(self, nbunch): for n in nbunch: self.remove_node(n) def nodes_iter(self, nbunch=None, data=False): if nbunch is None: nbunch=self.adj.iterkeys() else: nbunch=self.nbunch_iter(nbunch) if data: for n in nbunch: data=self.label.get(n,None) yield (n,data) else: for n in nbunch: yield n def nodes(self, nbunch=None, data=False): if data: return dict(self.nodes_iter(nbunch,data)) else: return list(self.nodes_iter(nbunch)) def get_node(self, n): if n not in self.adj: raise NetworkXError("node %s not in graph"%(n,)) else: data=self.label.get(n,None) return data def clear(self): super(LabeledGraph,self).clear() self.label={} def subgraph(self, nbunch, copy=True): H=super(LabeledGraph,self).subgraph(nbunch, copy) H.label=dict( (k,v) for k,v in self.label.items() if k in H) return H def to_directed(self): H=super(LabeledGraph,self).to_directed() H.label=dict( (k,v) for k,v in self.label.items() if k in H) return H class LabeledDiGraph(LabeledGraph,DiGraph): pass # just use the inherited classes
Python
0.000042
@@ -378,14 +378,12 @@ bel, -' dict -' ):%0A
490b888f052476ddd667e4ccd609618b4160953b
Add the vulcan-mgmt VLAN type
lib/python2.6/aquilon/aqdb/model/vlan.py
lib/python2.6/aquilon/aqdb/model/vlan.py
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2010,2011,2012,2013 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The classes pertaining to VLAN info""" from datetime import datetime from sqlalchemy import (Column, Integer, DateTime, ForeignKey, CheckConstraint, UniqueConstraint) from sqlalchemy.orm import relation, backref, deferred, object_session from sqlalchemy.sql import func, and_ from aquilon.exceptions_ import NotFoundException, InternalError from aquilon.aqdb.column_types import AqStr, Enum from aquilon.aqdb.model import Base, Network, Switch MAX_VLANS = 4096 # IEEE 802.1Q standard VLAN_TYPES = ('storage', 'vmotion', 'user', 'unknown') _VTN = 'vlan_info' class VlanInfo(Base): """ information regarding well-known/standardized vlans """ __tablename__ = _VTN _instance_label = 'vlan_id' vlan_id = Column(Integer, primary_key=True) port_group = Column(AqStr(32), nullable=False) vlan_type = Column(Enum(32, VLAN_TYPES), nullable=False) @classmethod def get_vlan_id(cls, session, port_group, compel=InternalError): info = session.query(cls).filter_by(port_group=port_group).first() if not info and compel: raise compel("No VLAN found for port group %s" % port_group) return info.vlan_id @classmethod def get_port_group(cls, session, vlan_id, compel=InternalError): info = session.query(cls).filter_by(vlan_id=vlan_id).first() if not info and compel: raise compel("No port group found for VLAN id %s" % vlan_id) return info.port_group def __repr__(self): return '<%s vlan_id=%s port_group=%s vlan_type=%s>' % ( self.__class__.__name__, self.vlan_id, self.port_group, self.vlan_type) vlaninfo = VlanInfo.__table__ # pylint: disable=C0103 vlaninfo.primary_key.name = '%s_pk' % _VTN vlaninfo.append_constraint( UniqueConstraint('port_group', name='%s_port_group_uk' % _VTN)) vlaninfo.info['unique_fields'] = ['port_group'] vlaninfo.info['extra_search_fields'] = ['vlan_id'] vlaninfo.append_constraint( CheckConstraint('vlan_id < %d' % MAX_VLANS, name='%s_max_vlan_id_ck' % _VTN)) vlaninfo.append_constraint( CheckConstraint('vlan_id >= 0', name='%s_min_vlan_id_ck' % _VTN)) _TN = 'observed_vlan' _ABV = 'obs_vlan' class ObservedVlan(Base): """ reports the observance of a vlan/network on a switch """ __tablename__ = 'observed_vlan' switch_id = Column(Integer, ForeignKey('switch.hardware_entity_id', ondelete='CASCADE', name='%s_hw_fk' % _ABV), primary_key=True) network_id = Column(Integer, ForeignKey('network.id', ondelete='CASCADE', name='%s_net_fk' % _ABV), primary_key=True) vlan_id = Column(Integer, ForeignKey('vlan_info.vlan_id', name='%s_vlan_fk' % _ABV), primary_key=True) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) switch = relation(Switch, backref=backref('%ss' % _TN, cascade='delete', passive_deletes=True, order_by=[vlan_id])) network = relation(Network, backref=backref('%ss' % _TN, cascade='delete', passive_deletes=True, order_by=[vlan_id])) vlan = relation(VlanInfo, uselist=False, primaryjoin=vlan_id == VlanInfo.vlan_id, foreign_keys=[VlanInfo.vlan_id], viewonly=True) @property def port_group(self): if self.vlan: return self.vlan.port_group return None @property def vlan_type(self): if self.vlan: return self.vlan.vlan_type return None @property def guest_count(self): from aquilon.aqdb.model import (EsxCluster, Cluster, ClusterResource, Resource, VirtualMachine, Machine, HardwareEntity, Interface) session = object_session(self) q = session.query(func.count()) q = q.filter(and_( # Select VMs on clusters that belong to the given switch EsxCluster.switch_id == self.switch_id, Cluster.id == EsxCluster.esx_cluster_id, ClusterResource.cluster_id == Cluster.id, Resource.holder_id == ClusterResource.id, VirtualMachine.resource_id == Resource.id, Machine.machine_id == VirtualMachine.machine_id, # Select interfaces with the right port group HardwareEntity.id == Machine.machine_id, Interface.hardware_entity_id == HardwareEntity.id, Interface.port_group == VlanInfo.port_group, VlanInfo.vlan_id == self.vlan_id)) return q.scalar() @classmethod def get_network(cls, session, switch, vlan_id, compel=NotFoundException): q = session.query(cls).filter_by(switch=switch, vlan_id=vlan_id) nets = q.all() if not nets: raise compel("No network found for switch %s and VLAN %s" % (switch.fqdn, vlan_id)) if len(nets) > 1: raise InternalError("More than one network found for switch %s " "and VLAN %s" % (switch.fqdn, vlan_id)) return nets[0].network obsvlan = ObservedVlan.__table__ # pylint: disable=C0103 obsvlan.primary_key.name = '%s_pk' % _TN obsvlan.append_constraint( CheckConstraint('vlan_id < %d' % MAX_VLANS, name='%s_max_vlan_id_ck' % _TN)) obsvlan.append_constraint( CheckConstraint('vlan_id >= 0', name='%s_min_vlan_id_ck' % _TN))
Python
0.996919
@@ -1262,16 +1262,31 @@ unknown' +, 'vulcan-mgmt' )%0A%0A_VTN
8e6f080a55e91f14122c2a0a36cb15cf566769b4
use CommunicationChannel0 in AlphaTwirl.py
AlphaTwirl/AlphaTwirl.py
AlphaTwirl/AlphaTwirl.py
# Tai Sakuma <[email protected]> import argparse import sys import os import itertools from Configure import TableConfigCompleter from Configure import EventReaderCollectorAssociatorBuilder from HeppyResult import ComponentReaderComposite from HeppyResult import ComponentLoop from HeppyResult import HeppyResult from EventReader import EventReaderBundle from EventReader import EventReaderCollectorAssociator from EventReader import EventReaderCollectorAssociatorComposite from EventReader import EventLoopRunner from EventReader import MPEventLoopRunner from Concurrently import CommunicationChannel from ProgressBar import ProgressBar from ProgressBar import ProgressMonitor, BProgressMonitor, NullProgressMonitor from Counter import Counts try: from HeppyResult import BEventBuilder as EventBuilder except ImportError: pass ##__________________________________________________________________|| class ArgumentParser(argparse.ArgumentParser): def __init__(self, owner, *args, **kwargs): super(ArgumentParser, self).__init__(*args, **kwargs) self.owner = owner def parse_args(self, *args, **kwargs): args = super(ArgumentParser, self).parse_args(*args, **kwargs) self.owner.args = args return args ##__________________________________________________________________|| def buildEventLoopRunner(progressMonitor, communicationChannel, processes): if communicationChannel is None: # single process eventLoopRunner = EventLoopRunner(progressMonitor) else: eventLoopRunner = MPEventLoopRunner(communicationChannel) return eventLoopRunner ##__________________________________________________________________|| def createTreeReader(progressMonitor, communicationChannel, outDir, force, nevents, processes, analyzerName, fileName, treeName, tableConfigs, eventSelection): tableConfigCompleter = TableConfigCompleter(defaultCountsClass = Counts, defaultOutDir = outDir) tableConfigs = [tableConfigCompleter.complete(c) for c in tableConfigs] if not force: tableConfigs = [c for c in tableConfigs if c['outFile'] and not os.path.exists(c['outFilePath'])] tableCreatorBuilder = EventReaderCollectorAssociatorBuilder() tableCreators = EventReaderCollectorAssociatorComposite(progressMonitor.createReporter()) for tblcfg in tableConfigs: tableCreators.add(tableCreatorBuilder.build(tblcfg)) eventLoopRunner = buildEventLoopRunner(progressMonitor = progressMonitor, communicationChannel = communicationChannel, processes = processes) eventBuilder = EventBuilder(analyzerName, fileName, treeName, nevents) eventReaderBundle = EventReaderBundle(eventBuilder, eventLoopRunner, tableCreators, eventSelection = eventSelection) return eventReaderBundle ##__________________________________________________________________|| class AlphaTwirl(object): def __init__(self): self.args = None self.componentReaders = ComponentReaderComposite() self.treeReaderConfigs = [ ] def ArgumentParser(self, *args, **kwargs): parser = ArgumentParser(self, *args, **kwargs) parser = self._add_arguments(parser) return parser def _add_arguments(self, parser): parser.add_argument('-i', '--heppydir', default = '/Users/sakuma/work/cms/c150130_RA1_data/74X/MC/20150713_MC/20150713_SingleMu', action = 'store', help = "Heppy results dir") parser.add_argument("-p", "--processes", action = "store", default = None, type = int, help = "number of processes to run in parallel") parser.add_argument("-q", "--quiet", action = "store_true", default = False, help = "quiet mode") parser.add_argument('-o', '--outDir', default = 'tbl/out', action = 'store') parser.add_argument("-n", "--nevents", action = "store", default = -1, type = int, help = "maximum number of events to process for each component") parser.add_argument("-c", "--components", default = None, nargs = '*', help = "the list of components") parser.add_argument("--force", action = "store_true", default = False, dest="force", help = "recreate all output files") return parser def _create_CommunicationChannel_and_ProgressMonitor(self): self.progressBar = None if self.args.quiet else ProgressBar() if self.args.processes is None: self.progressMonitor = NullProgressMonitor() if self.args.quiet else ProgressMonitor(presentation = self.progressBar) self.communicationChannel = None else: self.progressMonitor = NullProgressMonitor() if self.args.quiet else BProgressMonitor(presentation = self.progressBar) self.communicationChannel = CommunicationChannel(self.args.processes, self.progressMonitor) def addComponentReader(self, reader): self.componentReaders.add(reader) def addTreeReader(self, analyzerName, fileName, treeName, tableConfigs, eventSelection = None): cfg = dict( analyzerName = analyzerName, fileName = fileName, treeName = treeName, tableConfigs = tableConfigs, eventSelection = eventSelection ) self.treeReaderConfigs.append(cfg) def run(self): if self.args is None: self.ArgumentParser().parse_args() self._create_CommunicationChannel_and_ProgressMonitor() for cfg in self.treeReaderConfigs: treeReader = createTreeReader( self.progressMonitor, self.communicationChannel, self.args.outDir, self.args.force, self.args.nevents, self.args.processes, **cfg) self.addComponentReader(treeReader) if self.progressMonitor is not None: self.progressMonitor.begin() if self.communicationChannel is not None: self.communicationChannel.begin() componentLoop = ComponentLoop(self.componentReaders) if self.args.components == ['all']: self.args.components = None heppyResult = HeppyResult(path = self.args.heppydir, componentNames = self.args.components) componentLoop(heppyResult.components()) if self.communicationChannel is not None: self.communicationChannel.end() if self.progressMonitor is not None: self.progressMonitor.end() ##__________________________________________________________________||
Python
0
@@ -597,16 +597,63 @@ Channel%0A +from Concurrently import CommunicationChannel0%0A from Pro @@ -680,16 +680,16 @@ ressBar%0A - from Pro @@ -1307,372 +1307,8 @@ gs%0A%0A -##__________________________________________________________________%7C%7C%0Adef buildEventLoopRunner(progressMonitor, communicationChannel, processes):%0A if communicationChannel is None: # single process%0A eventLoopRunner = EventLoopRunner(progressMonitor)%0A else:%0A eventLoopRunner = MPEventLoopRunner(communicationChannel)%0A return eventLoopRunner%0A%0A ##__ @@ -1461,19 +1461,8 @@ nts, - processes, ana @@ -2093,21 +2093,18 @@ unner = -build +MP EventLoo @@ -2115,109 +2115,28 @@ ner( -progressMonitor = progressMonitor, communicationChannel = communicationChannel, processes = processes +communicationChannel )%0A%0A @@ -3924,16 +3924,44 @@ is None + or self.args.processes == 0 :%0A @@ -4124,20 +4124,59 @@ annel = -None +CommunicationChannel0(self.progressMonitor) %0A @@ -5317,45 +5317,8 @@ ts,%0A - self.args.processes,%0A @@ -5469,50 +5469,8 @@ - if self.communicationChannel is not None: sel @@ -5785,57 +5785,15 @@ ())%0A + - if self.communicationChannel is not None: sel
c6453752f9630a760cd2b2508d9ba39413871d86
Update SensorMotorTest.py
04Dan/SensorMotorTest.py
04Dan/SensorMotorTest.py
import RPi.GPIO as GPIO GPIO.setmode(GPIO.BOARD) ##GPIO.setup(18, GPIO.OUT) servo ##GPIO.setup(22, GPIO.OUT) motor GPIO.setup(16, GPIO.IN) ##button try: while True: i = GPIO.input(16) print(i) delay(1000) except Keyboardinterupt: GPIO.cleanup()
Python
0
@@ -234,16 +234,17 @@ ardinter +r upt:%0A G
c3ecc4a06a212da11f52c9c0cd5c7b5c8d500516
Support -h/--help on createdb.py
createdb.py
createdb.py
#!/usr/bin/env python import sys import fedmsg.config import fmn.lib.models config = fedmsg.config.load_config() uri = config.get('fmn.sqlalchemy.uri') if not uri: raise ValueError("fmn.sqlalchemy.uri must be present") session = fmn.lib.models.init(uri, debug=True, create=True) if '--with-dev-data' in sys.argv: user1 = fmn.lib.models.User.get_or_create(session, username="ralph") user2 = fmn.lib.models.User.get_or_create(session, username="toshio") user3 = fmn.lib.models.User.get_or_create(session, username="toshio") context1 = fmn.lib.models.Context.create( session, name="irc", description="Internet Relay Chat", detail_name="irc nick", icon="user", placeholder="z3r0_c00l", ) context2 = fmn.lib.models.Context.create( session, name="email", description="Electronic Mail", detail_name="email address", icon="envelope", placeholder="[email protected]", ) context3 = fmn.lib.models.Context.create( session, name="gcm", description="Google Cloud Messaging", detail_name="registration id", icon="phone", placeholder="laksdjfasdlfkj183097falkfj109f" ) prefs1 = fmn.lib.models.Preference.create( session, user=user1, context=context1, detail_value="threebean", ) prefs2 = fmn.lib.models.Preference.create( session, user=user1, context=context2, detail_value="[email protected]", ) session.commit()
Python
0
@@ -219,16 +219,119 @@ sent%22)%0A%0A +if '-h' in sys.argv or '--help'in sys.argv:%0A print %22createdb.py %5B--with-dev-data%5D%22%0A sys.exit(0)%0A%0A session
29205582e07eaa8b28eea4b0691a9556d0999015
Remove unused LoginForm
src/keybar/web/forms.py
src/keybar/web/forms.py
from django.utils.translation import ugettext_lazy as _ from django.contrib import auth import floppyforms.__future__ as forms from keybar.models.user import User class RegisterForm(forms.ModelForm): name = forms.CharField(label=_('Your name'), widget=forms.TextInput( attrs={'placeholder': _('e.g Jorah Mormont')})) email = forms.EmailField(label=_('Email')) class Meta: model = User fields = ('name', 'email') class LoginForm(forms.Form): email = forms.EmailField(label=_('Email')) password = forms.CharField(label=_('Password'), widget=forms.PasswordInput) error_messages = { 'invalid_login': _('Please enter a correct email and password. ' 'Note that both fields may be case-sensitive.'), } def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) self.authenticated_user = None def clean(self): email = self.cleaned_data.get('email') password = self.cleaned_data.get('password') if email and password: self.authenticated_user = auth.authenticate(email=email, password=password) if self.authenticated_user is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login') return self.cleaned_data def get_user(self): return self.authenticated_user
Python
0.000001
@@ -461,1013 +461,4 @@ l')%0A -%0A%0Aclass LoginForm(forms.Form):%0A email = forms.EmailField(label=_('Email'))%0A password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)%0A%0A error_messages = %7B%0A 'invalid_login': _('Please enter a correct email and password. '%0A 'Note that both fields may be case-sensitive.'),%0A %7D%0A%0A def __init__(self, *args, **kwargs):%0A super(LoginForm, self).__init__(*args, **kwargs)%0A self.authenticated_user = None%0A%0A def clean(self):%0A email = self.cleaned_data.get('email')%0A password = self.cleaned_data.get('password')%0A%0A if email and password:%0A self.authenticated_user = auth.authenticate(email=email, password=password)%0A%0A if self.authenticated_user is None:%0A raise forms.ValidationError(%0A self.error_messages%5B'invalid_login'%5D,%0A code='invalid_login')%0A%0A return self.cleaned_data%0A%0A def get_user(self):%0A return self.authenticated_user%0A
e139537de43ade6549c790332f41b586aed4f63c
Put generated .c files in intermediate dir so this test can be compatible with change to detect duplicate sources.
test/actions-multiple/src/actions.gyp
test/actions-multiple/src/actions.gyp
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { # Have a long string so that actions will exceed xp 512 character # command limit on xp. 'long_string': 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' 'abcdefghijklmnopqrstuvwxyz0123456789' }, 'targets': [ { 'target_name': 'multiple_action_target', 'type': 'none', 'actions': [ { 'action_name': 'action1', 'inputs': [ 'copy.py', 'input.txt', ], 'outputs': [ 'output1.txt', ], 'action': [ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, { 'action_name': 'action2', 'inputs': [ 'copy.py', 'input.txt', ], 'outputs': [ 'output2.txt', ], 'action': [ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, { 'action_name': 'action3', 'inputs': [ 'copy.py', 'input.txt', ], 'outputs': [ 'output3.txt', ], 'action': [ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, { 'action_name': 'action4', 'inputs': [ 'copy.py', 'input.txt', ], 'outputs': [ 'output4.txt', ], 'action': [ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, ], }, { 'target_name': 'multiple_action_source_filter', 'type': 'executable', 'sources': [ 'main.c', # TODO(bradnelson): add foo.c here once this issue is fixed: # http://code.google.com/p/gyp/issues/detail?id=175 ], 'actions': [ { 'action_name': 'action1', 'inputs': [ 'foo.c', 'filter.py', ], 'outputs': [ 'output1.c', ], 'process_outputs_as_sources': 1, 'action': [ 'python', 'filter.py', 'foo', 'bar', 'foo.c', '<(_outputs)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, { 'action_name': 'action2', 'inputs': [ 'foo.c', 'filter.py', ], 'outputs': [ 'output2.c', ], 'process_outputs_as_sources': 1, 'action': [ 'python', 'filter.py', 'foo', 'car', 'foo.c', '<(_outputs)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, { 'action_name': 'action3', 'inputs': [ 'foo.c', 'filter.py', ], 'outputs': [ 'output3.c', ], 'process_outputs_as_sources': 1, 'action': [ 'python', 'filter.py', 'foo', 'dar', 'foo.c', '<(_outputs)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, { 'action_name': 'action4', 'inputs': [ 'foo.c', 'filter.py', ], 'outputs': [ 'output4.c', ], 'process_outputs_as_sources': 1, 'action': [ 'python', 'filter.py', 'foo', 'ear', 'foo.c', '<(_outputs)', ], # Allows the test to run without hermetic cygwin on windows. 'msvs_cygwin_shell': 0, }, ], }, ], }
Python
0.000001
@@ -3025,32 +3025,52 @@ %5B%0A ' +%3C(INTERMEDIATE_DIR)/ output1.c',%0A @@ -3061,24 +3061,24 @@ output1.c',%0A - %5D, @@ -3195,32 +3195,33 @@ ar', 'foo.c', '%3C +@ (_outputs)',%0A @@ -3501,32 +3501,52 @@ %5B%0A ' +%3C(INTERMEDIATE_DIR)/ output2.c',%0A @@ -3671,32 +3671,33 @@ ar', 'foo.c', '%3C +@ (_outputs)',%0A @@ -3977,32 +3977,52 @@ %5B%0A ' +%3C(INTERMEDIATE_DIR)/ output3.c',%0A @@ -4147,32 +4147,33 @@ ar', 'foo.c', '%3C +@ (_outputs)',%0A @@ -4453,32 +4453,52 @@ %5B%0A ' +%3C(INTERMEDIATE_DIR)/ output4.c',%0A @@ -4563,32 +4563,32 @@ 'action': %5B%0A - 'pyt @@ -4627,24 +4627,25 @@ 'foo.c', '%3C +@ (_outputs)',
e498d8e91cb421665ea7f28c7113a9fc33548d29
add timeout
test/functional/feature_asset_zdag.py
test/functional/feature_asset_zdag.py
#!/usr/bin/env python3 # Copyright (c) 2019-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import SyscoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, set_node_times, disconnect_nodes, connect_nodes, bump_node_times from test_framework.messages import COIN import time ZDAG_NOT_FOUND = -1 ZDAG_STATUS_OK = 0 ZDAG_WARNING_RBF = 1 ZDAG_WARNING_NOT_ZDAG_TX = 2 ZDAG_WARNING_SIZE_OVER_POLICY = 3 ZDAG_MAJOR_CONFLICT = 4 MAX_INITIAL_BROADCAST_DELAY = 15 * 60 # 15 minutes in seconds class AssetZDAGTest(SyscoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.rpc_timeout = 240 self.extra_args = [['-assetindex=1'],['-assetindex=1'],['-assetindex=1']] def run_test(self): self.nodes[0].generate(200) self.sync_blocks() self.basic_zdag_doublespend() def basic_zdag_doublespend(self): self.basic_asset() self.nodes[0].generate(1) newaddress2 = self.nodes[1].getnewaddress() newaddress3 = self.nodes[1].getnewaddress() newaddress1 = self.nodes[0].getnewaddress() self.nodes[2].importprivkey(self.nodes[1].dumpprivkey(newaddress2)) self.nodes[0].assetsend(self.asset, newaddress1, int(2*COIN)) # create 2 utxo's so below newaddress1 recipient of 0.5 COIN uses 1 and the newaddress3 recipient on node3 uses the other on dbl spend self.nodes[0].sendtoaddress(newaddress2, 1) self.nodes[0].sendtoaddress(newaddress2, 1) self.nodes[0].generate(1) self.sync_blocks() out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset': 0.5}) assert_equal(len(out), 1) out = self.nodes[2].listunspent() assert_equal(len(out), 2) # send 2 asset UTXOs to newaddress2 same logic as explained above about dbl spend self.nodes[0].assetallocationsend(self.asset, newaddress2, int(1*COIN)) self.nodes[0].assetallocationsend(self.asset, newaddress2, int(0.4*COIN)) self.nodes[0].generate(1) self.sync_blocks() # should have 2 sys utxos and 2 asset utxos out = self.nodes[2].listunspent() assert_equal(len(out), 4) # this will use 1 sys utxo and 1 asset utxo and send it to change address owned by node2 self.nodes[1].assetallocationsend(self.asset, newaddress1, int(0.3*COIN)) self.sync_mempools(timeout=30) # node3 should have 2 less utxos because they were sent to change on node2 out = self.nodes[2].listunspent(minconf=0) assert_equal(len(out), 2) # disconnect node 2 and 3 so they can double spend without seeing each others transaction disconnect_nodes(self.nodes[1], 2) tx1 = self.nodes[1].assetallocationsend(self.asset, newaddress1, int(1*COIN))['txid'] time.sleep(1) # dbl spend tx2 = self.nodes[2].assetallocationsend(self.asset, newaddress1, int(0.9*COIN))['txid'] # use tx2 to build tx3 tx3 = self.nodes[2].assetallocationsend(self.asset, newaddress1, int(0.05*COIN))['txid'] # use tx2 to build tx4 tx4 = self.nodes[2].assetallocationsend(self.asset, newaddress1, int(0.025*COIN))['txid'] connect_nodes(self.nodes[1], 2) # broadcast transactions bump_node_times(self.nodes, MAX_INITIAL_BROADCAST_DELAY) time.sleep(2) self.sync_mempools(timeout=30) for i in range(3): assert_equal(self.nodes[i].assetallocationverifyzdag(tx1)['status'], ZDAG_MAJOR_CONFLICT) # ensure the tx2 made it to mempool, should propogate dbl-spend first time assert_equal(self.nodes[i].assetallocationverifyzdag(tx2)['status'], ZDAG_MAJOR_CONFLICT) # will conflict because its using tx2 which is in conflict state assert_equal(self.nodes[i].assetallocationverifyzdag(tx3)['status'], ZDAG_MAJOR_CONFLICT) # will conflict because its using tx3 which uses tx2 which is in conflict state assert_equal(self.nodes[i].assetallocationverifyzdag(tx4)['status'], ZDAG_MAJOR_CONFLICT) self.nodes[0].generate(1) self.sync_blocks() for i in range(3): self.nodes[i].getrawtransaction(tx1) assert_equal(self.nodes[i].assetallocationverifyzdag(tx1)['status'], ZDAG_NOT_FOUND) assert_equal(self.nodes[i].assetallocationverifyzdag(tx2)['status'], ZDAG_NOT_FOUND) assert_equal(self.nodes[i].assetallocationverifyzdag(tx3)['status'], ZDAG_NOT_FOUND) assert_equal(self.nodes[i].assetallocationverifyzdag(tx4)['status'], ZDAG_NOT_FOUND) assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[i].getrawtransaction, tx2) assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[i].getrawtransaction, tx3) assert_raises_rpc_error(-5, 'No such mempool transaction', self.nodes[i].getrawtransaction, tx4) out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0,'maximumAmountAsset':0}) assert_equal(len(out), 1) out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0.3,'maximumAmountAsset':0.3}) assert_equal(len(out), 1) out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':0.6,'maximumAmountAsset':0.6}) assert_equal(len(out), 1) out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset, 'minimumAmountAsset':1.0,'maximumAmountAsset':1.0}) assert_equal(len(out), 1) out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset}) assert_equal(len(out), 4) def basic_asset(self): self.asset = self.nodes[0].assetnew('1', "TST", "asset description", "0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46", 8, 1000*COIN, 10000*COIN, 31, {})['asset_guid'] if __name__ == '__main__': AssetZDAGTest().main()
Python
0.000015
@@ -3572,16 +3572,18 @@ ST_DELAY ++1 )%0A
2626b5dbfe91a6b8fee7beab370e60a5a474c699
Add my implementation of kind()
CS212/Lesson-01/poker.py
CS212/Lesson-01/poker.py
# # In the first Lesson of the class we are attempting to # build a Poker program. # def poker(hands): "Return the best hand: poker([hand,...]) => hand" return max(hands, key=hand_rank) def hand_rank(hand): ranks = card_ranks(hand) if straight(ranks) and flush(hand): # straight flush return (8, max(ranks)) elif kind(4, ranks): # 4 of a kind return (7, kind(4, ranks), kind(1, ranks)) elif kind(3, ranks) and kind(2, ranks): # full house return (6, kind(3, ranks), kind(2, ranks)) elif flush(hand): # flush return (5, ranks) elif straight(ranks): # straight return (4, max(hand)) elif kind(3, ranks): # 3 of a kind return (3, kind(3, ranks), ranks) elif two_pair(ranks): # 2 pair return (2, two_pair(ranks)) elif kind(2, ranks): # kind return (1, ranks) else: # high card return (0, ranks) def card_ranks(cards): "Return a list of the ranks, sorted with higher first." RANK_MAP = dict(zip(["T", "J", "Q", "K", "A"], range(10, 15))) def rank_to_int(card): r, s = card if r in RANK_MAP: return RANK_MAP[r] else: return int(r) ranks = map(rank_to_int, cards) ranks.sort(reverse=True) return ranks def straight(ranks): "Return True if the ordered ranks form a 5-card straight." return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5 def flush(hand): "Return True if all the cards have the same suit." suits = [s for r, s in hand] return len(set(suits)) == 1 def test(): "Test cases for the functions in poker program" sf = "6C 7C 8C 9C TC".split() # Straight Flush fk = "9D 9H 9S 9C 7D".split() # Four of a Kind fh = "TD TC TH 7C 7D".split() # Full House assert card_ranks(sf) == [10, 9, 8, 7, 6] assert card_ranks(fk) == [9, 9, 9, 9, 7] assert card_ranks(fh) == [10, 10, 10, 7, 7] assert poker([sf, fk, fh]) == sf assert poker([fk, fh]) == fk assert poker([fh, fh]) == fh assert poker([sf]) == sf assert poker([sf] + 99*[fh]) == sf assert hand_rank(sf) == (8, 10) assert hand_rank(fk) == (7, 9, 7) assert hand_rank(fh) == (6, 10, 7) return 'tests pass' print test()
Python
0
@@ -1787,16 +1787,339 @@ == 1%0A%0A%0A +def kind(n, ranks):%0A %22%22%22Return the first rank that this hand has exactly n of.%0A Return None if there is no n-of-a-kind in the hand.%22%22%22%0A%0A last = None%0A count = 0%0A%0A for rank in ranks:%0A if last != rank:%0A if count == n: return last%0A count = 0%0A last = rank%0A count += 1%0A%0A if count == n:%0A return last%0A%0A%0A def test @@ -2322,24 +2322,25 @@ Full House%0A +%0A assert c @@ -2470,77 +2470,71 @@ 7%5D%0A +%0A -assert poker(%5Bsf, fk, fh%5D) == sf%0A assert poker(%5Bfk, fh%5D +fkranks = card_ranks(fk)%0A%0A assert kind(4, fkranks ) == -fk +9 %0A @@ -2545,97 +2545,102 @@ ert -poker(%5Bfh, fh%5D) == fh%0A assert poker(%5Bsf%5D) == sf%0A assert poker(%5Bsf%5D + 99*%5Bfh%5D +kind(3, fkranks) is None%0A assert kind(2, fkranks) is None%0A assert kind(1, fkranks ) == -sf +7%0A %0A @@ -2745,24 +2745,199 @@ (6, 10, 7)%0A +%0A assert poker(%5Bsf, fk, fh%5D) == sf%0A assert poker(%5Bfk, fh%5D) == fk%0A assert poker(%5Bfh, fh%5D) == fh%0A assert poker(%5Bsf%5D) == sf%0A assert poker(%5Bsf%5D + 99 * %5Bfh%5D) == sf%0A%0A return '
8d8002062a0ecbf3720870d7561670a8c7e98da2
Fix test for auth tokens store
test/stores/test_auth_tokens_store.py
test/stores/test_auth_tokens_store.py
from test.base import ApiTestCase from zou.app.stores import auth_tokens_store class CommandsTestCase(ApiTestCase): def setUp(self): super(CommandsTestCase, self).setUp() self.store = auth_tokens_store self.store.clear() def tearDown(self): self.store.clear() def test_get_and_add(self): self.assertIsNone(self.store.get("key-1")) self.store.add("key-1", "true") self.assertEquals(self.store.get("key-1"), "true") def test_delete(self): self.store.add("key-1", "true") self.store.delete("key-1") self.assertIsNone(self.store.get("key-1")) def test_is_revoked(self): self.assertTrue(self.store.is_revoked({"jti": "key-1"})) self.store.add("key-1", "true") self.assertTrue(self.store.is_revoked({"jti": "key-1"})) self.store.add("key-1", "false") self.assertFalse(self.store.is_revoked({"jti": "key-1"})) def test_keys(self): self.store.add("key-1", "true") self.store.add("key-2", "true") self.assertEquals( self.store.keys(), ["key-1", "key-2"] )
Python
0.000002
@@ -1069,23 +1069,50 @@ f.assert -Equals( +True(%22key-1%22 in self.store.keys()) %0A @@ -1116,56 +1116,50 @@ - self. -store.keys(), %5B%22key-1%22, %22key-2%22%5D%0A +assertTrue(%22key-2%22 in self.store.keys() )%0A
7f43dfc790e9f7d18a31513ab739391b264fa12d
Fix new version check
testcases/cloud_admin/upgrade_euca.py
testcases/cloud_admin/upgrade_euca.py
#!/usr/bin/env python # # # Description: This script upgrades a Eucalyptus cloud import re from eucaops import Eucaops from eutester.euservice import Euservice from eutester.eutestcase import EutesterTestCase class Upgrade(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() self.parser.add_argument("--euca-url",) self.parser.add_argument("--enterprise-url") self.parser.add_argument("--branch") if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password) if not self.args.branch and not self.args.euca_url and not self.args.enterprise_url: self.args.branch = self.args.upgrade_to_branch machine = self.tester.get_component_machines("clc")[0] self.old_version = machine.sys("cat /etc/eucalyptus/eucalyptus-version")[0] ### IF we were passed a branch, fetch the correct repo urls from the repo API if self.args.branch: self.args.euca_url = self.get_repo_url("eucalyptus", self.args.branch) self.args.enterprise_url =self.get_repo_url("internal", self.args.branch) def clean_method(self): pass def get_repo_url(self, repo = "eucalyptus", branch = "testing"): import httplib api_host = "packages.release.eucalyptus-systems.com" machine = self.tester.get_component_machines("clc")[0] path="/api/1/genrepo/?distro="+str(machine.distro.name)+"&releasever=6&arch=x86_64&[email protected]:"+str(repo)+"&ref="+str(branch) + "&allow-old" conn=httplib.HTTPConnection(api_host) conn.request("GET", path) res=conn.getresponse() repo_url = res.read().strip() self.tester.debug("Setting " + repo + " URL to: " + repo_url) return repo_url def add_euca_repo(self): for machine in self.tester.config["machines"]: machine.add_repo(self.args.euca_url,"euca-upgrade") def add_enterprise_repo(self): for machine in self.tester.config["machines"]: machine.add_repo(self.args.enterprise_url, "ent-upgrade") def upgrade_packages(self): for machine in self.tester.config["machines"]: machine.upgrade() new_version = machine.sys("cat /etc/eucalyptus/eucalyptus-version")[0] if re.match( self.old_version, self.new_version): raise Exception("Version before (" + self.old_version +") and version after (" + new_version + ") are not the same") def start_components(self): for machine in self.tester.config["machines"]: if re.search("clc", " ".join(machine.components)) or re.search("ws", " ".join(machine.components)) or re.search("sc", " ".join(machine.components)): machine.sys("service eucalyptus-cloud start") if re.search("nc", " ".join(machine.components)): machine.sys("service eucalyptus-nc start") if re.search("cc", " ".join(machine.components)): machine.sys("service eucalyptus-cc start") def set_block_storage_manager(self): clc_service = Euservice("eucalyptus", self.tester) enabled_clc = self.tester.service_manager.wait_for_service(clc_service) for zone in self.tester.get_zones(): enabled_clc.machine.sys("source " + self.credpath + "/eucarc && euca-modify-property -p " + zone + "storage.blockstoragemanager=overlay") def UpgradeAll(self): self.add_euca_repo() if self.args.enterprise_url: self.add_enterprise_repo() self.upgrade_packages() self.start_components() if re.search("^3.1", self.old_version): self.set_block_storage_manager() if __name__ == "__main__": testcase = Upgrade() ### Either use the list of tests passed from config/command line to determine what subset of tests to run list = testcase.args.tests or [ "UpgradeAll"] ### Convert test suite methods to EutesterUnitTest objects unit_list = [ ] for test in list: unit_list.append( testcase.create_testunit_by_name(test) ) ### Run the EutesterUnitTest objects result = testcase.run_test_case_list(unit_list,clean_on_exit=True) exit(result)
Python
0.000001
@@ -2564,21 +2564,16 @@ ersion, -self. new_vers
639a692bc06cf31b5feb1d990740976884f88a0c
Fix key format (?)
testlog_etl/transforms/jscov_to_es.py
testlog_etl/transforms/jscov_to_es.py
# encoding: utf-8 # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Author: Trung Do ([email protected]) # from __future__ import division from __future__ import unicode_literals import json from pyLibrary.dot import Dict from pyLibrary.dot import wrap from pyLibrary.env import http from testlog_etl.transforms import EtlHeadGenerator from testlog_etl.transforms.pulse_block_to_es import scrub_pulse_record def process(source_key, source, destination, resources, please_stop=None): keys = [] records = [] etl_header_gen = EtlHeadGenerator(source_key) for i, line in enumerate(source.read_lines()): stats = Dict() pulse_record = scrub_pulse_record(source_key, i, line, stats) artifact_file_name = pulse_record.artifact.name # we're only interested in jscov files, at lease at the moment if "jscov" not in artifact_file_name: continue # construct the artifact's full url taskId = pulse_record.status.taskId runId = pulse_record.runId full_artifact_path = "https://public-artifacts.taskcluster.net/" + taskId + "/" + str(runId) + "/" + artifact_file_name # fetch the artifact response = http.get(full_artifact_path).all_content # transform json_data = wrap(json.loads(response)) for j, obj in enumerate(json_data): # get the test name. Just use the test file name at the moment # TODO: change this when needed test_name = obj.testUrl.split("/")[-1] for line in obj.covered: dest_key, dest_etl = etl_header_gen.next(pulse_record.etl, j) new_line = { "test": { "name": test_name, "url": obj.testUrl }, "source": { "sourceFile": obj.sourceFile, "lineCovered": line }, "etl": dest_etl } records.append({"id": dest_key, "value": new_line}) keys.append(dest_key) destination.extend(records) return keys
Python
0.00006
@@ -1799,16 +1799,66 @@ etl, j)%0A + key = dest_key + %22.%22 + unicode(j)%0A @@ -2262,21 +2262,16 @@ (%7B%22id%22: -dest_ key, %22va @@ -2315,21 +2315,16 @@ .append( -dest_ key)%0A%0A
d4f2fadd94603eea2c15f5bb8a2a7d29c0d39ed0
Hello David
CreateM3Us/CreateM3Us.py
CreateM3Us/CreateM3Us.py
import os incomingDirectory = 'C:\\temp' for subdir, dirs, files in os.walk(incomingDirectory): for file in files: #print os.path.join(subdir, file) filepath = subdir + os.sep + file print (filepath) # File input/output # https://www.digitalocean.com/community/tutorials/how-to-handle-plain-text-files-in-python-3 fileA = open(incomingDirectory + '/something.txt', 'w') fileA.write("Some text") fileA.close() # Desired Output/Structure """ C:\temp\GenreA\GenreA.m3u C:\temp\GenreA\Artist1\Artist1.m3u C:\temp\GenreA\Artist1\AlbumA\FileA.txt C:\temp\GenreA\Artist1\AlbumA\FileB.txt C:\temp\GenreA\Artist1\AlbumB\FileA.txt C:\temp\GenreA\Artist1\AlbumB\FileB.txt C:\temp\GenreA\Artist1\AlbumB\FileC.txt C:\temp\GenreA\Artist2\Artist2.m3u C:\temp\GenreA\Artist2\AlbumA\FileA.txt C:\temp\GenreA\Artist2\AlbumA\FileB.txt C:\temp\GenreA\Artist2\AlbumB\FileA.txt C:\temp\GenreA\Artist2\AlbumB\FileB.txt C:\temp\GenreA\Artist2\AlbumB\FileC.txt """ # M3U file (C:\temp\GenreA\GenreA.m3u) """ Artist1/AlbumA/FileA.txt Artist1/AlbumA/FileB.txt Artist1/AlbumB/FileA.txt Artist1/AlbumB/FileB.txt Artist1/AlbumB/FileC.txt Artist2/... """ #M3U file (C:\temp\GenreA\Artist1\Artist1.m3u) """ AlbumA/FileA.txt AlbumA/FileB.txt AlbumB/FileA.txt AlbumB/FileB.txt AlbumB/FileC.txt """
Python
0.999971
@@ -94,16 +94,38 @@ ectory): + #What does is.walk do %0D%0A fo
62bb2ed7c32fe7af668584ea4d5845cedb923d13
Add with-items use case to mistral integration tests
st2tests/integration/mistral/test_wiring.py
st2tests/integration/mistral/test_wiring.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet import unittest2 import multiprocessing from st2client import client as st2 from st2client import models class TestWorkflowExecution(unittest2.TestCase): @classmethod def setUpClass(cls): cls.st2client = st2.Client(base_url='http://localhost') def test_cpu_count(self): # Ensure tests are run on multi-processor system to catch race conditions self.assertGreaterEqual(multiprocessing.cpu_count(), 2) def _execute_workflow(self, action, parameters): execution = models.LiveAction(action=action, parameters=parameters) execution = self.st2client.liveactions.create(execution) self.assertIsNotNone(execution.id) self.assertEqual(execution.action['ref'], action) self.assertIn(execution.status, ['scheduled', 'running']) return execution def _wait_for_completion(self, execution, wait=300): for i in range(wait): eventlet.sleep(1) execution = self.st2client.liveactions.get_by_id(execution.id) if execution.status in ['succeeded', 'failed']: break return execution def _assert_success(self, execution): self.assertEqual(execution.status, 'succeeded') tasks = execution.result['tasks'] for task in tasks: self.assertIn('state', task) self.assertEqual(task['state'], 'SUCCESS') def _assert_failure(self, execution): self.assertEqual(execution.status, 'failed') tasks = execution.result['tasks'] for task in tasks: self.assertIn('state', task) self.assertEqual(task['state'], 'ERROR') def test_basic_workflow(self): execution = self._execute_workflow('examples.mistral-basic', {'cmd': 'date'}) execution = self._wait_for_completion(execution) self._assert_success(execution) self.assertIn('stdout', execution.result) def test_basic_workbook(self): execution = self._execute_workflow('examples.mistral-workbook-basic', {'cmd': 'date'}) execution = self._wait_for_completion(execution) self._assert_success(execution) self.assertIn('stdout', execution.result) def test_complex_workbook(self): execution = self._execute_workflow( 'examples.mistral-workbook-complex', {'vm_name': 'demo1'}) execution = self._wait_for_completion(execution) self._assert_success(execution) self.assertIn('vm_id', execution.result) self.assertIn('vm_state', execution.result) def test_complex_workbook_subflow_actions(self): execution = self._execute_workflow( 'examples.mistral-workbook-subflows', {'subject': 'st2', 'adjective': 'cool'}) execution = self._wait_for_completion(execution) self._assert_success(execution) self.assertIn('tagline', execution.result) self.assertEqual(execution.result['tagline'], 'st2 is cool!') def test_concurrent_load(self): wf_name = 'examples.mistral-workbook-complex' wf_params = {'vm_name': 'demo1'} executions = [self._execute_workflow(wf_name, wf_params) for i in range(20)] eventlet.sleep(10) for execution in executions: execution = self._wait_for_completion(execution) self._assert_success(execution) self.assertIn('vm_id', execution.result) self.assertIn('vm_state', execution.result) def test_execution_failure(self): execution = self._execute_workflow('examples.mistral-basic', {'cmd': 'foo'}) execution = self._wait_for_completion(execution) self._assert_failure(execution)
Python
0
@@ -3742,24 +3742,351 @@ is cool!')%0A%0A + def test_with_items(self):%0A params = %7B'cmd': 'date', 'count': 8%7D%0A execution = self._execute_workflow('examples.mistral-repeat', params)%0A execution = self._wait_for_completion(execution)%0A self._assert_success(execution)%0A self.assertEqual(len(execution.result%5B'result'%5D), params%5B'count'%5D)%0A%0A def test
7050f13a5cda372d3fc003991981b04700114f52
Add volume cleanup to test_volume_transfer test
tempest/api/volume/test_volume_transfers.py
tempest/api/volume/test_volume_transfers.py
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base from tempest import clients from tempest.test import attr class VolumesTransfersTest(base.BaseVolumeV1Test): _interface = "json" @classmethod def setUpClass(cls): super(VolumesTransfersTest, cls).setUpClass() # Add another tenant to test volume-transfer if cls.config.compute.allow_tenant_isolation: creds = cls.isolated_creds.get_alt_creds() username, tenant_name, password = creds cls.os_alt = clients.Manager(username=username, password=password, tenant_name=tenant_name, interface=cls._interface) cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id'] # Add admin tenant to cleanup resources adm_creds = cls.isolated_creds.get_admin_creds() admin_username, admin_tenant_name, admin_password = adm_creds cls.os_adm = clients.Manager(username=admin_username, password=admin_password, tenant_name=admin_tenant_name, interface=cls._interface) else: cls.os_alt = clients.AltManager() alt_tenant_name = cls.os_alt.tenant_name identity_client = cls._get_identity_admin_client() _, tenants = identity_client.list_tenants() cls.alt_tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] == alt_tenant_name][0] cls.os_adm = clients.ComputeAdminManager(interface=cls._interface) cls.client = cls.volumes_client cls.alt_client = cls.os_alt.volumes_client cls.adm_client = cls.os_adm.volumes_client @attr(type='gate') def test_create_get_list_accept_volume_transfer(self): # Create a volume first volume = self.create_volume() # Create a volume transfer resp, transfer = self.client.create_volume_transfer(volume['id']) self.assertEqual(202, resp.status) transfer_id = transfer['id'] auth_key = transfer['auth_key'] self.client.wait_for_volume_status(volume['id'], 'awaiting-transfer') # Get a volume transfer resp, body = self.client.get_volume_transfer(transfer_id) self.assertEqual(200, resp.status) self.assertEqual(volume['id'], body['volume_id']) # List volume transfers, the result should be greater than # or equal to 1 resp, body = self.client.list_volume_transfers() self.assertEqual(200, resp.status) self.assertGreaterEqual(len(body), 1) # Accept a volume transfer by alt_tenant resp, body = self.alt_client.accept_volume_transfer(transfer_id, auth_key) self.assertEqual(202, resp.status) self.alt_client.wait_for_volume_status(volume['id'], 'available') def test_create_list_delete_volume_transfer(self): # Create a volume first volume = self.create_volume() # Create a volume transfer resp, body = self.client.create_volume_transfer(volume['id']) self.assertEqual(202, resp.status) transfer_id = body['id'] self.client.wait_for_volume_status(volume['id'], 'awaiting-transfer') # List all volume transfers, there's only one in this test resp, body = self.client.list_volume_transfers() self.assertEqual(200, resp.status) self.assertEqual(volume['id'], body[0]['volume_id']) # Delete a volume transfer resp, body = self.client.delete_volume_transfer(transfer_id) self.assertEqual(202, resp.status) self.client.wait_for_volume_status(volume['id'], 'available') class VolumesTransfersTestXML(VolumesTransfersTest): _interface = "xml"
Python
0.000005
@@ -2486,16 +2486,278 @@ client%0A%0A + def _delete_volume(self, volume_id):%0A # Delete the specified volume using admin creds%0A resp, _ = self.adm_client.delete_volume(volume_id)%0A self.assertEqual(202, resp.status)%0A self.adm_client.wait_for_resource_deletion(volume_id)%0A%0A @att @@ -2891,32 +2891,91 @@ .create_volume() +%0A self.addCleanup(self._delete_volume, volume%5B'id'%5D) %0A%0A # Crea @@ -4183,16 +4183,75 @@ volume() +%0A self.addCleanup(self._delete_volume, volume%5B'id'%5D) %0A%0A
4e58bcfe15144af1cd6dfde166a02291a9a3a413
add run_module
tests/chainer_tests/functions_tests/normalization_tests/test_layer_normalization.py
tests/chainer_tests/functions_tests/normalization_tests/test_layer_normalization.py
import unittest import numpy import chainer from chainer import cuda from chainer import functions from chainer import gradient_check from chainer import testing from chainer.testing import attr from chainer.testing import condition def _batch_normalization(expander, gamma, beta, x, mean, var): mean = mean[expander] std = numpy.sqrt(var)[expander] y_expect = (gamma[expander] * (x - mean) / std + beta[expander]) return y_expect @testing.parameterize(*(testing.product({ 'batchsize': [1, 5], 'size': [10, 20], 'dtype': [numpy.float32], }))) class TestBatchNormalization(unittest.TestCase): def setUp(self): shape = (self.batchsize, self.size) size = (numpy.prod(shape) // shape[0],) x = numpy.random.uniform(-1, 1, shape).astype(self.dtype) gamma = numpy.random.uniform(-1, 1, size).astype(self.dtype) beta = numpy.random.uniform(-1, 1, size).astype(self.dtype) self.args = [x, gamma, beta] self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype) self.ggx = [numpy.random.uniform(-1, 1, _.shape).astype(_.dtype) for _ in self.args] self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3} self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3} if self.dtype == numpy.float16: self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2} self.check_backward_options = {'atol': 5e-1, 'rtol': 1e-1} def check_forward(self, args): x_data = args[0] def func(x): args_ = x, args[1], args[2] return functions.LayerNormalization().apply(args_)[0] y = func(x_data) self.assertEqual(y.data.dtype, self.dtype) unbatched_concat_y = chainer.functions.concat( [func(one_x[None, ]) for one_x in x_data], axis=0) testing.assert_allclose( y.data, unbatched_concat_y.data, **self.check_forward_options) @condition.retry(3) def test_forward_cpu(self): self.check_forward(self.args) @attr.gpu @condition.retry(3) def test_forward_gpu(self): self.check_forward( [cuda.to_gpu(_) for _ in self.args], cuda.to_gpu(self.gy)) def check_backward(self, args, y_grad): def func(*args_): return functions.LayerNormalization().apply(args_) gradient_check.check_backward( func, args, y_grad, eps=1e-2, **self.check_backward_options) @condition.retry(3) def test_backward_cpu(self): self.check_backward(self.args, self.gy) @attr.gpu @condition.retry(3) def test_backward_gpu(self): self.check_backward( [cuda.to_gpu(_) for _ in self.args], cuda.to_gpu(self.gy)) def check_double_backward(self, args, y_grad, x_grad_grad): def func(*args_): y = functions.LayerNormalization().apply(args_)[0] return y * y gradient_check.check_double_backward( func, args, y_grad, x_grad_grad, eps=1e-2, **self.check_backward_options) @condition.retry(3) def test_double_backward_cpu(self): self.check_double_backward(self.args, self.gy, self.ggx) @attr.gpu @condition.retry(3) def test_double_backward_gpu(self): self.check_double_backward( [cuda.to_gpu(_) for _ in self.args], cuda.to_gpu(self.gy), [cuda.to_gpu(_) for _ in self.ggx])
Python
0.000001
@@ -3481,8 +3481,49 @@ f.ggx%5D)%0A +%0A%0Atesting.run_module(__name__, __file__)%0A
0f0cfe01323e92b12a54da8aa71ca0e3951956df
version increment
SAGA/__init__.py
SAGA/__init__.py
""" This is the top directory of the SAGA package """ from .database import Database from .hosts import HostCatalog from .objects import ObjectCatalog, ObjectCuts from .targets import TargetSelection __version__ = '0.5.2'
Python
0.000002
@@ -218,7 +218,7 @@ 0.5. -2 +3 '%0A
c9e70018bda0df294c55deea2378eab07566118b
update network
SCNIC/general.py
SCNIC/general.py
from __future__ import division import re import numpy as np import networkx as nx from biom.table import Table from datetime import datetime from collections import OrderedDict from numpy.random import multivariate_normal from statsmodels.sandbox.stats.multicomp import multipletests __author__ = 'shafferm' """functions used widely""" class Logger(OrderedDict): """""" def __init__(self, output): super(Logger, self).__init__() self.output_file = output self['start time'] = datetime.now() def output_log(self): with open(self.output_file, 'w') as f: self['finish time'] = datetime.now() self['elapsed time'] = self['finish time'] - self['start time'] for key, value in self.items(): f.write(key + ': ' + str(value) + '\n') def p_adjust(pvalues, method='fdr_bh'): res = multipletests(pvalues, method=method) return np.array(res[1], dtype=float) def sparcc_paper_filter(table): """if a observation averages more than 2 reads per sample then keep, if a sample has more than 500 reads then keep""" table = table.copy() table.filter(table.ids(axis='sample')[table.sum(axis='sample') > 500], axis='sample') table.filter(table.ids(axis='observation')[table.sum(axis='observation') / table.shape[1] >= 2], axis="observation") return table def df_to_biom(df): return Table(np.transpose(df.values), [str(i) for i in df.columns], [str(i) for i in df.index]) def get_metadata_from_table(table, axis='observation'): metadata = dict() for _, otu_i, metadata_i in table.iter(axis=axis): if metadata_i is not None: metadata[str(otu_i)] = metadata_i return metadata def underscore_to_camelcase(str_): str_ = re.split('[-_]', str_) if len(str_) > 1: str_ = [str_[0]] + [i.capitalize() for i in str_[1:]] return ''.join(str_) def filter_correls(correls, min_p=None, min_r=None, conet=False): """correls is a pandas dataframe with a multiindex containing the correlated pair of features, r and optionally p and p_adj and any others""" # TODO: allow non r column names # TODO: allow non p_adj column names if conet: correls = correls[correls.r > 0] if min_p is not None: # filter to only include significant correlations if 'p_adj' in correls.columns: correls = correls[correls.p_adj < min_p] elif 'p' in correls.columns: correls = correls[correls.p < min_p] else: raise ValueError("No p or p_adj in correls") if min_r is not None: correls = correls[np.abs(correls.r) > min_r] return correls def correls_to_net(correls, metadata=None): if metadata is None: metadata = {} graph = nx.Graph() for otu_pair, correl in correls.iterrows(): for otu in otu_pair: if otu not in graph.node: graph.add_node(otu) if otu in metadata: for key in metadata[otu]: graph_key = underscore_to_camelcase(str(key)) if metadata[otu][key] is None: continue elif type(metadata[otu][key]) == str: graph.nodes[otu][graph_key] = metadata[otu][key] elif hasattr(metadata[otu][key], '__iter__'): graph.nodes[otu][graph_key] = ';'.join(metadata[otu][key]) else: graph.nodes[otu][graph_key] = metadata[otu][key] graph.add_edge(*otu_pair) for i in correl.index: graph_key = underscore_to_camelcase(str(i)) graph.edges[otu_pair][graph_key] = correl[i] return graph def filter_table(table, min_samples): """filter relative abundance table, by default throw away things greater than 1/3 zeros""" table = table.copy() # first sample filter to_keep = [i for i in table.ids(axis='observation') if sum(table.data(i, axis='observation') != 0) >= min_samples] table.filter(to_keep, axis='observation') return table def simulate_correls(corr_stren=(.99, .99), std=(1, 1, 1, 2, 2), means=(100, 100, 100, 100, 100), size=30, noncors=10, noncors_mean=100, noncors_std=100): """ Generates a correlation matrix with diagonal of stds based on input parameters and fills rest of matrix with uncorrelated values all with same mean and standard deviations. Output should have a triangle of correlated observations and a pair all other observations should be uncorrelated. Correlation to covariance calculated by cor(X,Y)=cov(X,Y)/sd(X)sd(Y). Parameters ---------- corr_stren: tuple of length 2, correlations in triangle and in pair std: tuple of length 5, standard deviations of each observation means: tuple of length 5, mean of each observation size: number of samples to generate from the multivariate normal distribution noncors: number of uncorrelated values noncors_mean: mean of uncorrelated values noncors_std: standard deviation of uncorrelated values Returns ------- table: a biom table with (size) samples and (5+noncors) observations """ cor = [[std[0], corr_stren[0], corr_stren[0], 0., 0.], # define the correlation matrix for the triangle and pair [corr_stren[0], std[1], corr_stren[0], 0., 0.], [corr_stren[0], corr_stren[0], std[2], 0., 0.], [0., 0., 0., std[3], corr_stren[1]], [0., 0., 0., corr_stren[1], std[4]]] cor = np.array(cor) cov = np.zeros(np.array(cor.shape) + noncors) # generate empty covariance matrix to be filled for i in range(cor.shape[0]): # fill in all but diagonal of covariance matrix, first 5 for j in range(i + 1, cor.shape[0]): curr_cov = cor[i, j] * cor[i, i] * cor[j, j] cov[i, j] = curr_cov cov[j, i] = curr_cov for i in range(cor.shape[0]): # fill diagonal of covariance matrix, first 5 cov[i, i] = np.square(cor[i, i]) means = list(means) for i in range(cor.shape[0], cov.shape[0]): # fill diagonal of covariance, 6 to end and populate mean list cov[i, i] = noncors_std means.append(noncors_mean) # fill the count table counts = multivariate_normal(means, cov, size).T counts = np.round(counts) observ_ids = ["Observ_" + str(i) for i in range(cov.shape[0])] sample_ids = ["Sample_" + str(i) for i in range(size)] table = Table(counts, observ_ids, sample_ids) return table
Python
0.000001
@@ -2920,16 +2920,17 @@ aph.node +s :%0A
0f1551db96cd27ed20e62545cac1540a405e8f1a
fix bug
FlaskWebProject/views.py
FlaskWebProject/views.py
""" Routes and views for the flask application. """ import os from datetime import datetime from flask import render_template, request from FlaskWebProject import app from generate_summary_json import generate_summary_json @app.route('/') @app.route('/home') def home(): """Renders the home page.""" return render_template( 'index.html', title='Home Page', year=datetime.now().year, ) @app.route('/summarize', methods=['GET']) def summarize(): access_token = os.getenv('TREEHACKS_SLACK_ACCESS_TOKEN') member_id = request.args.get('user_id') channel_id = request.args.get('channel_id') channel_name = request.args.get('channel_name') num_messages = request.args.get('text') summary_json = generate_summary_json(member_id, channel_id, channel_name, num_messages, TEST_TEAM_SLACK_ACCESS_TOKEN) return {'text': channel_name, 'private': True} if __name__ == '__main__': app.run(debug=True)
Python
0.000001
@@ -513,16 +513,16 @@ v('T -REEHACKS +EST_TEAM _SLA @@ -823,36 +823,20 @@ es, -TEST_TEAM_SLACK_ACCESS_TOKEN +access_token )%0A
2dc6cfb97a579c5560c40e9311b53603da4de962
Improve variable name in ChessMatch update match embed method
Discord/modules/chess.py
Discord/modules/chess.py
import discord # import asyncio import datetime import subprocess import chess import chess.engine import chess.pgn import chess.svg from wand.image import Image class ChessMatch(chess.Board): @classmethod async def start(cls, ctx, white_player, black_player): self = cls() self.ctx = ctx self.white_player = white_player self.black_player = black_player self.bot = ctx.bot # TODO: Dynamically load chess engine not locked to version? self.engine_transport, self.chess_engine = await chess.engine.popen_uci("bin/stockfish_10_x64.exe", creationflags = subprocess.CREATE_NO_WINDOW) # TODO: Use popcnt.exe? self.match_message = None self.task = ctx.bot.loop.create_task(self.match_task()) return self # TODO: Cancel task on deletion/bot shutdown def make_move(self, move): try: self.push_san(move) except ValueError: try: self.push_uci(move) except ValueError: return False return True def valid_move(self, move): try: self.parse_san(move) except ValueError: try: self.parse_uci(move) except ValueError: return False return True async def match_task(self): self.match_message = await self.ctx.embed_send("Loading..") await self.update_match_embed() while True: player = [self.black_player, self.white_player][int(self.turn)] embed = self.match_message.embeds[0] if player == self.bot.user: await self.match_message.edit(embed = embed.set_footer(text = "I'm thinking..")) result = await self.chess_engine.play(self, chess.engine.Limit(time = 2)) self.push(result.move) await self.update_match_embed(footer_text = f"I moved {result.move}") else: message = await self.bot.wait_for("message", check = lambda msg: msg.author == player and msg.channel == self.ctx.channel and self.valid_move(msg.content)) await self.match_message.edit(embed = embed.set_footer(text = "Processing move..")) self.make_move(message.content) if self.is_game_over(): footer_text = discord.Embed.Empty else: footer_text = f"It is {['black', 'white'][int(self.turn)]}'s ({[self.black_player, self.white_player][int(self.turn)]}'s) turn to move" await self.update_match_embed(footer_text = footer_text) await self.bot.attempt_delete_message(message) async def update_match_embed(self, *, flipped = None, footer_text = discord.Embed.Empty): if flipped is None: flipped = not self.turn if self.move_stack: lastmove = self.peek() else: lastmove = None if self.is_check(): check = self.king(self.turn) else: check = None # svg = self._repr_svg_() svg = chess.svg.board(self, lastmove = lastmove, check = check, flipped = flipped) svg = svg.replace("y=\"390\"", "y=\"395\"") with open(self.bot.data_path + "/temp/chess_board.svg", 'w') as image: print(svg, file = image) with Image(filename = self.bot.data_path + "/temp/chess_board.svg") as img: img.format = "png" img.save(filename = self.bot.data_path + "/temp/chess_board.png") # asyncio.sleep(0.2) # necessary?, wasn't even awaited if self.match_message: embed = self.match_message.embeds[0] else: embed = discord.Embed(color = self.bot.bot_color) chess_pgn = chess.pgn.Game.from_board(self) chess_pgn.headers["Site"] = "Discord" chess_pgn.headers["Date"] = datetime.datetime.utcnow().strftime("%Y.%m.%d") chess_pgn.headers["White"] = self.white_player.mention chess_pgn.headers["Black"] = self.black_player.mention embed.description = str(chess_pgn) # TODO: Upload into embed + delete and re-send to update? ''' embed.set_image(url = self.bot.imgur_client.upload_from_path(self.bot.data_path + "/temp/chess_board.png")["link"]) embed.set_image(url = data["data"]["img_url"]) ''' image_message = await self.bot.cache_channel.send(file = discord.File(self.bot.data_path + "/temp/chess_board.png")) embed.set_image(url = image_message.attachments[0].url) embed.set_footer(text = footer_text) if not self.match_message: self.match_message = await self.ctx.send(embed = embed) else: await self.match_message.edit(embed = embed) async def new_match_embed(self, *, flipped = None, footer_text = None): if flipped is None: flipped = not self.turn if footer_text is None: if self.is_game_over(): footer_text = discord.Embed.Empty else: footer_text = f"It's {['black', 'white'][int(self.turn)]}'s ({[self.black_player, self.white_player][int(self.turn)]}'s) turn to move" if self.match_message: await self.match_message.delete() self.match_message = None await self.update_match_embed(flipped = flipped, footer_text = footer_text)
Python
0
@@ -2977,17 +2977,21 @@ s im -g +age :%0A%09%09%09im -g +age .for @@ -3007,17 +3007,19 @@ g%22%0A%09%09%09im -g +age .save(fi
529987bb17a05c041cdbf3bbe2a98edda72872fc
remove unneeded Todo
InvenTree/plugin/urls.py
InvenTree/plugin/urls.py
""" URL lookup for plugin app """ from django.conf.urls import url, include from plugin import plugin_reg PLUGIN_BASE = 'plugin' # Constant for links def get_plugin_urls(): """returns a urlpattern that can be integrated into the global urls""" urls = [] for plugin in plugin_reg.plugins.values(): if plugin.mixin_enabled('urls'): urls.append(plugin.urlpatterns) # TODO wrap everything in plugin_url_wrapper return url(f'^{PLUGIN_BASE}/', include((urls, 'plugin')))
Python
0.000036
@@ -397,57 +397,8 @@ ns)%0A - # TODO wrap everything in plugin_url_wrapper%0A
6137a6f00abbeb81b080f534481bb255f950dd83
access oauth token securely through azure
FlaskWebProject/views.py
FlaskWebProject/views.py
""" Routes and views for the Flask application. """ from flask import render_template, request from FlaskWebProject import app from oauth_constants import TEST_TEAM_SLACK_ACCESS_TOKEN from generate_summary_json import generate_summary_json global TEST_TEAM_SLACK_ACCESS_TOKEN @app.route('/') @app.route('/home') def home(): """Renders the home page.""" return render_template( 'index.html', title='Home Page' ) # text is number of messages @app.route('/summarize', methods=['GET']) def summarize(): member_id = requests.args.get('user_id') channel_id = requests.args.get('channel_id') channel_name = requests.args.get('channel_name') num_messages = requests.args.get('text') summary_json = generate_summary_json(member_id, channel_id, channel_name, num_messages, TEST_TEAM_SLACK_ACCESS_TOKEN) return {'text': channel_name, 'private': True} if __name__ == '__main__': app.run(debug=True)
Python
0
@@ -44,16 +44,26 @@ ion.%0A%22%22%22 +%0Aimport os %0A%0Afrom f @@ -136,66 +136,8 @@ pp%0A%0A -from oauth_constants import TEST_TEAM_SLACK_ACCESS_TOKEN %0A from @@ -193,24 +193,44 @@ on%0A%0A -global TEST_TEAM +%0AACCESS_TOKEN = os.getenv('TREEHACKS _SLA @@ -244,16 +244,18 @@ SS_TOKEN +') %0A%0A%0A@app. @@ -499,16 +499,28 @@ mmarize( +ACCESS_TOKEN ):%0A m
cd5f824a2d756c8770be6f47d946c7e39c85228e
Fix postcode importing all
molly/apps/places/providers/postcodes.py
molly/apps/places/providers/postcodes.py
import simplejson, urllib, random, csv, zipfile, tempfile, urllib2, os.path from django.contrib.gis.geos import Point from molly.apps.places.providers import BaseMapsProvider from molly.apps.places.models import Entity, EntityType, Source from molly.conf.settings import batch class PostcodesMapsProvider(BaseMapsProvider): def __init__(self, codepoint_path, import_areas=None): self.codepoint_path = codepoint_path self.import_areas = import_areas @batch('%d 12 1 1 *' % random.randint(0, 59)) def import_data(self, metadata, output): entity_type, source = self._get_entity_type(), self._get_source() if not os.path.exists(self.codepoint_path): archive_url = urllib2.urlopen('http://freepostcodes.org.uk/static/code-point-open/codepo_gb.zip') archive_file = open(self.codepoint_path, 'w') archive_file.write(archive_url.read()) archive_file.close() archive = zipfile.ZipFile(self.codepoint_path) if self.import_areas: filenames = ['Code-Point Open/data/CSV/%s.csv' % code.lower() for code in self.import_areas] else: filenames = [path for path in archive.listnames() if re.match(r'Code\-Point Open\/data\/CSV\/[a-z]{1,2}.csv', path)] for filename in filenames: if hasattr(archive, 'open'): f = archive.open(filename) else: f = tempfile.TemporaryFile() f.write(archive.read(filename)) f.seek(0) reader = csv.reader(f) self._load_from_csv(reader, entity_type, source) del f def _load_from_csv(self, reader, entity_type, source): j = 0 for i, line in enumerate(reader): postcode_abbrev, (easting, northing) = line[0], line[10:12] if postcode_abbrev[-4] != ' ': postcode = '%s %s' % (postcode_abbrev[:-3], postcode_abbrev[-3:]) else: postcode = postcode_abbrev postcode_abbrev = postcode_abbrev.replace(' ', '') try: easting, northing = int(easting), int(northing) except ValueError: continue j += 1 try: entity = Entity.objects.get(source=source, _identifiers__scheme='postcode', _identifiers__value=postcode_abbrev) except Entity.DoesNotExist: entity = Entity(source=source) entity.title = postcode entity.location = Point(easting, northing, srid=27700) entity.geometry = entity.location entity.primary_type = entity_type identifiers = { 'postcode': postcode_abbrev, 'postcode-canonical': postcode, } entity.save(identifiers=identifiers) entity.all_types.add(entity_type) entity.update_all_types_completion() def _get_entity_type(self): try: return EntityType.objects.get(slug='post-code') except EntityType.DoesNotExist: entity_type = EntityType( slug = 'post-code', article = 'a', verbose_name = 'postcode', verbose_name_plural = 'postcodes', show_in_nearby_list = False, show_in_category_list = False, ) entity_type.save() return entity_type def _get_source(self): try: source = Source.objects.get(module_name="molly.providers.apps.maps.postcodes") except Source.DoesNotExist: source = Source(module_name="molly.providers.apps.maps.postcodes") source.name = "Postcodes" source.save() return source
Python
0
@@ -1218,17 +1218,16 @@ ive. +name list -names () i
e5cc051bc7be854e253853d85b1de8b3037170be
always convert to floats
nbgrader/preprocessors/overwritecells.py
nbgrader/preprocessors/overwritecells.py
from IPython.nbformat.v4.nbbase import validate from nbgrader import utils from nbgrader.api import Gradebook from nbgrader.preprocessors import NbGraderPreprocessor class OverwriteCells(NbGraderPreprocessor): """A preprocessor to overwrite information about grade and solution cells.""" def preprocess(self, nb, resources): # pull information from the resources self.notebook_id = resources['nbgrader']['notebook'] self.assignment_id = resources['nbgrader']['assignment'] self.db_url = resources['nbgrader']['db_url'] # connect to the database self.gradebook = Gradebook(self.db_url) nb, resources = super(OverwriteCells, self).preprocess(nb, resources) return nb, resources def update_cell_type(self, cell, cell_type): if cell.cell_type == cell_type: return elif cell_type == 'code': cell.cell_type = 'code' cell.outputs = [] cell.execution_count = None validate(cell, 'code_cell') elif cell_type == 'markdown': cell.cell_type = 'markdown' if 'outputs' in cell: del cell['outputs'] if 'execution_count' in cell: del cell['execution_count'] validate(cell, 'markdown_cell') def report_change(self, name, attr, old, new): self.log.warning( "Attribute '%s' for cell %s has changed! (should be: %s, got: %s)", attr, name, old, new) def preprocess_cell(self, cell, resources, cell_index): grade_id = cell.metadata.get('nbgrader', {}).get('grade_id', None) if grade_id is None: return cell, resources source_cell = self.gradebook.find_source_cell( grade_id, self.notebook_id, self.assignment_id) # check that the cell type hasn't changed if cell.cell_type != source_cell.cell_type: self.report_change(grade_id, "cell_type", source_cell.cell_type, cell.cell_type) self.update_cell_type(cell, source_cell.cell_type) # check that the locked status hasn't changed if utils.is_locked(cell) != source_cell.locked: self.report_change(grade_id, "locked", source_cell.locked, utils.is_locked(cell)) cell.metadata.nbgrader["locked"] = source_cell.locked # if it's a grade cell, check that the max score hasn't changed if utils.is_grade(cell): grade_cell = self.gradebook.find_grade_cell( grade_id, self.notebook_id, self.assignment_id) old_points = grade_cell.max_score new_points = cell.metadata.nbgrader["points"] if type(old_points)!=type(new_points): new_points=float(new_points) old_points=float(old_points) if old_points != new_points: self.report_change(grade_id, "points", old_points, new_points) cell.metadata.nbgrader["points"] = old_points # always update the checksum, just in case cell.metadata.nbgrader["checksum"] = source_cell.checksum # if it's locked, check that the checksum hasn't changed if source_cell.locked: old_checksum = source_cell.checksum new_checksum = utils.compute_checksum(cell) if old_checksum != new_checksum: self.report_change(grade_id, "checksum", old_checksum, new_checksum) cell.source = source_cell.source # double check the the checksum is correct now if utils.compute_checksum(cell) != source_cell.checksum: raise RuntimeError("Inconsistent checksums for cell {}".format(source_cell.name)) return cell, resources
Python
0.999827
@@ -2648,16 +2648,22 @@ oints = +float( grade_ce @@ -2674,16 +2674,17 @@ ax_score +) %0A @@ -2701,16 +2701,22 @@ oints = +float( cell.met @@ -2743,113 +2743,8 @@ ts%22%5D -%0A%09 if type(old_points)!=type(new_points):%0A%09%09new_points=float(new_points)%0A%09%09old_points=float(old_points )%0A%0A
cecbbd6de3b2d9ac63c24cb883780fe9c56e23f5
Fix required param
openquake/hazardlib/gsim/can15/sinter.py
openquake/hazardlib/gsim/can15/sinter.py
""" :module:`openquake.hazardlib.gsim.sinter` implements :class:`SInterCan15Mid`, :class:`SInterCan15Upp`, :class:`SInterCan15Low` """ import numpy as np from openquake.hazardlib.gsim.can15.western import get_sigma from openquake.hazardlib.gsim.base import CoeffsTable from openquake.hazardlib.gsim.zhao_2006 import ZhaoEtAl2006SInter from openquake.hazardlib.gsim.atkinson_macias_2009 import AtkinsonMacias2009 from openquake.hazardlib.gsim.abrahamson_2015 import AbrahamsonEtAl2015SInter from openquake.hazardlib.gsim.ghofrani_atkinson_2014 import \ GhofraniAtkinson2014 class SInterCan15Mid(ZhaoEtAl2006SInter): """ """ def _get_delta(dists): """ Computes the additional delta to be used for the computation of the upp and low models """ delta = np.zeros_like(dists.rrup) delta = np.min([(0.15-0.0007*dists.rrup), 0.35]) return delta def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) def _get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ """ # Zhao et al. 2006 - Vs30 + Rrup mean, stds1 = super().get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) cff = self.SITE_COEFFS[imt] mean_zh06 = mean + np.log(cff['mf']) # Atkinson and Macias (2009) - Rrup gmpe = AtkinsonMacias2009() mean_am09, stds2 = gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) mean_am09 += np.log(cff['mf']) # Abrahamson et al. (2015) - Rrup + vs30 + backarc gmpe = AbrahamsonEtAl2015SInter() mean_ab15, stds3 = gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) # Ghofrani and Atkinson (2014) - Rrup + vs30 gmpe = GhofraniAtkinson2014() mean_ga14, stds4 = gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) mean_ga14 += np.log(cff['mf']) # Computing adjusted mean and stds mean_adj = np.log(np.exp(mean_zh06)*0.1 + np.exp(mean_am09)*0.5 + np.exp(mean_ab15)*0.2 + np.exp(mean_ga14)*0.2) # note that in this case we do not apply a triangular smoothing on # distance as explained at page 996 stds_adj = np.log(np.exp(stds1) + np.exp(stds2) + np.exp(stds3) + np.exp(stds4)) return mean_adj, stds_adj SITE_COEFFS = CoeffsTable(sa_damping=5, table="""\ IMT mf pgv 1.000 pga 0.500 0.040 0.440 0.100 0.440 0.200 0.600 0.300 0.810 0.400 1.000 1.000 1.040 2.000 1.510 3.000 1.200 5.000 1.100 10.00 1.000 """) class SInterCan15Low(SInterCan15Mid): def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) mean -= self._get_delta(dists) class SInterCan15Upp(SInterCan15Mid): def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) mean += self._get_delta(dists) stddevs = [np.ones(len(dists.rjb))*get_sigma(imt)] return mean, stddevs
Python
0.000419
@@ -633,16 +633,75 @@ %22%22%22%0A%0A + REQUIRES_SITES_PARAMETERS = set(('vs30', 'backarc',))%0A%0A def
6565e5bd88ebe5fde8d65664041a9e8f571ca7d7
switch to requests
IMGURdl/downloadIMGUR.py
IMGURdl/downloadIMGUR.py
# example from: # https://www.toptal.com/python/beginners-guide-to-concurrency-and-parallelism-in-python import json import logging import os from pathlib import Path from urllib.request import urlopen, Request # import requests logger = logging.getLogger(__name__) def get_links(client_id): headers = {'Authorization': 'Client-ID {}'.format(client_id)} url = 'https://api.imgur.com/3/gallery/random/random/' resp = requests.get(url, headers=headers) resp.raise_for_status() data = resp.json() # req = Request('https://api.imgur.com/3/gallery/random/random/', headers=headers, method='GET') # with urlopen(req) as resp: # data = json.loads(resp.read().decode('utf-8')) return map(lambda item: item['link'], data['data']) def download_link(directory, link): logger.info('Downloading %s', link) download_path = directory / os.path.basename(link) with urlopen(link) as image, download_path.open('wb') as f: f.write(image.read()) def setup_download_dir(): download_dir = Path('images') if not download_dir.exists(): download_dir.mkdir() return download_dir
Python
0.000001
@@ -205,18 +205,16 @@ Request%0A -# import r
59b3d8b5bce596583f5901f1b3b79a883b7b8e55
Fix stocktake export
InvenTree/stock/admin.py
InvenTree/stock/admin.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from import_export.admin import ImportExportModelAdmin from import_export.resources import ModelResource from import_export.fields import Field import import_export.widgets as widgets from .models import StockLocation, StockItem, StockItemAttachment from .models import StockItemTracking from .models import StockItemTestResult from build.models import Build from company.models import SupplierPart from order.models import PurchaseOrder, SalesOrder from part.models import Part class LocationResource(ModelResource): """ Class for managing StockLocation data import/export """ parent = Field(attribute='parent', widget=widgets.ForeignKeyWidget(StockLocation)) parent_name = Field(attribute='parent__name', readonly=True) class Meta: model = StockLocation skip_unchanged = True report_skipped = False clean_model_instances = True exclude = [ # Exclude MPTT internal model fields 'lft', 'rght', 'tree_id', 'level', ] def after_import(self, dataset, result, using_transactions, dry_run, **kwargs): super().after_import(dataset, result, using_transactions, dry_run, **kwargs) # Rebuild the StockLocation tree(s) StockLocation.objects.rebuild() class LocationAdmin(ImportExportModelAdmin): resource_class = LocationResource list_display = ('name', 'pathstring', 'description') search_fields = ('name', 'description') class StockItemResource(ModelResource): """ Class for managing StockItem data import/export """ # Custom manaegrs for ForeignKey fields part = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part)) part_name = Field(attribute='part__full_ame', readonly=True) supplier_part = Field(attribute='supplier_part', widget=widgets.ForeignKeyWidget(SupplierPart)) supplier = Field(attribute='supplier_part__supplier__id', readonly=True) supplier_name = Field(attribute='supplier_part__supplier__name', readonly=True) status_label = Field(attribute='status_label', readonly=True) location = Field(attribute='location', widget=widgets.ForeignKeyWidget(StockLocation)) location_name = Field(attribute='location__name', readonly=True) belongs_to = Field(attribute='belongs_to', widget=widgets.ForeignKeyWidget(StockItem)) build = Field(attribute='build', widget=widgets.ForeignKeyWidget(Build)) sales_order = Field(attribute='sales_order', widget=widgets.ForeignKeyWidget(SalesOrder)) build_order = Field(attribute='build_order', widget=widgets.ForeignKeyWidget(Build)) purchase_order = Field(attribute='purchase_order', widget=widgets.ForeignKeyWidget(PurchaseOrder)) # Date management updated = Field(attribute='updated', widget=widgets.DateWidget()) stocktake_date = Field(attribute='stocktake_date', widget=widgets.DateWidget()) def after_import(self, dataset, result, using_transactions, dry_run, **kwargs): super().after_import(dataset, result, using_transactions, dry_run, **kwargs) # Rebuild the StockItem tree(s) StockItem.objects.rebuild() class Meta: model = StockItem skip_unchanged = True report_skipped = False clean_model_instance = True class StockItemAdmin(ImportExportModelAdmin): resource_class = StockItemResource list_display = ('part', 'quantity', 'location', 'status', 'updated') class StockAttachmentAdmin(admin.ModelAdmin): list_display = ('stock_item', 'attachment', 'comment') class StockTrackingAdmin(ImportExportModelAdmin): list_display = ('item', 'date', 'title') class StockItemTestResultAdmin(admin.ModelAdmin): list_display = ('stock_item', 'test', 'result', 'value') admin.site.register(StockLocation, LocationAdmin) admin.site.register(StockItem, StockItemAdmin) admin.site.register(StockItemTracking, StockTrackingAdmin) admin.site.register(StockItemAttachment, StockAttachmentAdmin) admin.site.register(StockItemTestResult, StockItemTestResultAdmin)
Python
0
@@ -479,16 +479,25 @@ s import + Company, Supplie @@ -502,16 +502,16 @@ ierPart%0A - from ord @@ -1824,16 +1824,17 @@ t__full_ +n ame', re @@ -2018,32 +2018,118 @@ readonly=True)%0A%0A + customer = Field(attribute='customer', widget=widgets.ForeignKeyWidget(Company))%0A%0A supplier_nam @@ -2592,24 +2592,108 @@ et(Build))%0A%0A + parent = Field(attribute='parent', widget=widgets.ForeignKeyWidget(StockItem))%0A%0A sales_or @@ -3494,32 +3494,32 @@ skipped = False%0A - clean_mo @@ -3539,16 +3539,143 @@ = True%0A%0A + exclude = %5B%0A # Exclude MPTT internal model fields%0A 'lft', 'rght', 'tree_id', 'level',%0A %5D%0A%0A %0Aclass S
1323154dfbc453959f3d64fef439288004f6461e
add test for SyntaxError on def f(a): global a
Lib/test/test_compile.py
Lib/test/test_compile.py
from test_support import verbose, TestFailed if verbose: print 'Running test on duplicate arguments' try: exec('def f(a, a): pass') raise TestFailed, "duplicate arguments" except SyntaxError: pass try: exec('def f(a = 0, a = 1): pass') raise TestFailed, "duplicate keyword arguments" except SyntaxError: pass
Python
0.000193
@@ -78,31 +78,30 @@ test +s on -duplicate arguments +argument handling '%0A%0At @@ -312,28 +312,154 @@ xcept SyntaxError:%0A pass%0A +%0Atry:%0A exec('def f(a): global a; a = 1')%0A raise TestFailed, %22variable is global and local%22%0Aexcept SyntaxError:%0A pass%0A
69a735cd134723e4d47c02d21f4ff85a65d28148
enable test_main.py
Lib/test/test_lib2to3.py
Lib/test/test_lib2to3.py
# Skipping test_parser and test_all_fixers # because of running from lib2to3.tests import (test_fixers, test_pytree, test_util, test_refactor, test_parser) import unittest from test.test_support import run_unittest def suite(): tests = unittest.TestSuite() loader = unittest.TestLoader() for m in (test_fixers, test_pytree,test_util, test_refactor, test_parser): tests.addTests(loader.loadTestsFromModule(m)) return tests def test_main(): run_unittest(suite()) if __name__ == '__main__': test_main()
Python
0.000004
@@ -174,16 +174,41 @@ t_parser +, test_main as test_main_ )%0Aimport @@ -418,16 +418,42 @@ t_parser +,%0A test_main_ ):%0A
28d49f4fd9e403acf3318f1098387190581ce283
Remove extra blank line
dummyserver/testcase.py
dummyserver/testcase.py
import unittest import socket import threading from nose.plugins.skip import SkipTest from tornado import ioloop, web from dummyserver.server import ( SocketServerThread, run_tornado_app, run_loop_in_thread, DEFAULT_CERTS, ) from dummyserver.handlers import TestingApp from dummyserver.proxy import ProxyHandler def consume_socket(sock, chunks=65536): while not sock.recv(chunks).endswith(b'\r\n\r\n'): pass class SocketDummyServerTestCase(unittest.TestCase): """ A simple socket-based server is created for this class that is good for exactly one request. """ scheme = 'http' host = 'localhost' @classmethod def _start_server(cls, socket_handler): ready_event = threading.Event() cls.server_thread = SocketServerThread(socket_handler=socket_handler, ready_event=ready_event, host=cls.host) cls.server_thread.start() ready_event.wait(5) if not ready_event.is_set(): raise Exception("most likely failed to start server") cls.port = cls.server_thread.port @classmethod def start_response_handler(cls, response, num=1, block_send=None): ready_event = threading.Event() def socket_handler(listener): for _ in range(num): ready_event.set() sock = listener.accept()[0] consume_socket(sock) if block_send: block_send.wait() block_send.clear() sock.send(response) sock.close() cls._start_server(socket_handler) return ready_event @classmethod def start_basic_handler(cls, **kw): return cls.start_response_handler( b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n' b'\r\n', **kw) @classmethod def tearDownClass(cls): if hasattr(cls, 'server_thread'): cls.server_thread.join(0.1) class IPV4SocketDummyServerTestCase(SocketDummyServerTestCase): @classmethod def _start_server(cls, socket_handler): ready_event = threading.Event() cls.server_thread = SocketServerThread(socket_handler=socket_handler, ready_event=ready_event, host=cls.host) cls.server_thread.USE_IPV6 = False cls.server_thread.start() ready_event.wait(5) if not ready_event.is_set(): raise Exception("most likely failed to start server") cls.port = cls.server_thread.port class HTTPDummyServerTestCase(unittest.TestCase): """ A simple HTTP server that runs when your test class runs Have your unittest class inherit from this one, and then a simple server will start when your tests run, and automatically shut down when they complete. For examples of what test requests you can send to the server, see the TestingApp in dummyserver/handlers.py. """ scheme = 'http' host = 'localhost' host_alt = '127.0.0.1' # Some tests need two hosts certs = DEFAULT_CERTS @classmethod def _start_server(cls): cls.io_loop = ioloop.IOLoop() app = web.Application([(r".*", TestingApp)]) cls.server, cls.port = run_tornado_app(app, cls.io_loop, cls.certs, cls.scheme, cls.host) cls.server_thread = run_loop_in_thread(cls.io_loop) @classmethod def _stop_server(cls): cls.io_loop.add_callback(cls.server.stop) cls.io_loop.add_callback(cls.io_loop.stop) cls.server_thread.join() @classmethod def setUpClass(cls): cls._start_server() @classmethod def tearDownClass(cls): cls._stop_server() class HTTPSDummyServerTestCase(HTTPDummyServerTestCase): scheme = 'https' host = 'localhost' certs = DEFAULT_CERTS class HTTPDummyProxyTestCase(unittest.TestCase): http_host = 'localhost' http_host_alt = '127.0.0.1' https_host = 'localhost' https_host_alt = '127.0.0.1' https_certs = DEFAULT_CERTS proxy_host = 'localhost' proxy_host_alt = '127.0.0.1' @classmethod def setUpClass(cls): cls.io_loop = ioloop.IOLoop() app = web.Application([(r'.*', TestingApp)]) cls.http_server, cls.http_port = run_tornado_app( app, cls.io_loop, None, 'http', cls.http_host) app = web.Application([(r'.*', TestingApp)]) cls.https_server, cls.https_port = run_tornado_app( app, cls.io_loop, cls.https_certs, 'https', cls.http_host) app = web.Application([(r'.*', ProxyHandler)]) cls.proxy_server, cls.proxy_port = run_tornado_app( app, cls.io_loop, None, 'http', cls.proxy_host) cls.server_thread = run_loop_in_thread(cls.io_loop) @classmethod def tearDownClass(cls): cls.io_loop.add_callback(cls.http_server.stop) cls.io_loop.add_callback(cls.https_server.stop) cls.io_loop.add_callback(cls.proxy_server.stop) cls.io_loop.add_callback(cls.io_loop.stop) cls.server_thread.join() class IPv6HTTPDummyServerTestCase(HTTPDummyServerTestCase): host = '::1' @classmethod def setUpClass(cls): if not socket.has_ipv6: raise SkipTest('IPv6 not available') else: super(IPv6HTTPDummyServerTestCase, cls).setUpClass() class IPv6HTTPDummyProxyTestCase(HTTPDummyProxyTestCase): http_host = 'localhost' http_host_alt = '127.0.0.1' https_host = 'localhost' https_host_alt = '127.0.0.1' https_certs = DEFAULT_CERTS proxy_host = '::1' proxy_host_alt = '127.0.0.1'
Python
0.99854
@@ -2686,17 +2686,16 @@ .port%0A%0A%0A -%0A class HT
92c123820be466ad76078537b457bb596b86c338
Put some meaningful standard includes in the sample configuration
dwight_chroot/config.py
dwight_chroot/config.py
import copy import os from .exceptions import ( CannotLoadConfiguration, InvalidConfiguration, NotRootException, UnknownConfigurationOptions, ) from .include import Include _USER_CONFIG_FILE_PATH = os.path.expanduser("~/.dwightrc") _USER_CONFIG_FILE_TEMPLATE = """# AUTOGENERATED DEFAULT CONFIG # ROOT_IMAGE = "/some/path/here" # INCLUDES = [Include("/dest/path/in/chroot", "/host/path")] # ENVIRON = {} # UID = None # None means taking the uid from SUDO_UID # PWD = os.path.abspath(".") """ class DwightConfiguration(object): def __init__(self): super(DwightConfiguration, self).__init__() self._config = dict( ROOT_IMAGE = None, INCLUDES = [], ENVIRON = {}, GID = None, UID = None, PWD = os.path.abspath("."), NUM_LOOP_DEVICES = None, ) self._known_keys = set(self._config) def __getitem__(self, key): return self._config[key] def __setitem__(self, key, value): if key not in self._known_keys: raise UnknownConfigurationOptions("Unknown configuration option: {0!r}".format(key)) self._config[key] = value def process_user_config_file(self, user_config_file_path=_USER_CONFIG_FILE_PATH): if not os.path.isfile(user_config_file_path): self._ensure_user_config_file(user_config_file_path) with open(user_config_file_path) as user_config_file: self.load_from_string(user_config_file) def _ensure_user_config_file(self, user_config_file_path): if not os.path.isdir(os.path.dirname(user_config_file_path)): os.makedirs(os.path.dirname(user_config_file_path)) with open(user_config_file_path, "w") as user_config_file: user_config_file.write(_USER_CONFIG_FILE_TEMPLATE) def _update_config(self, config): append_keys = ['INCLUDES'] appended_items = dict((append_key, config[append_key] + self._config[append_key]) for append_key in append_keys) self._config.update(config) self._config.update(appended_items) def load_from_string(self, s): d = copy.deepcopy(self._config) try: exec(s, {"Include" : Include}, d) except Exception as e: raise CannotLoadConfiguration("Cannot load configuration ({0})".format(e)) for key in list(d): if key.startswith("_") or not key[0].isupper(): d.pop(key) self._check_unknown_parameters(d) self._update_config(d) def _check_unknown_parameters(self, d): unknown = set(d) - self._known_keys if unknown: raise UnknownConfigurationOptions("Unknown configuration options: {0}".format(", ".join(map(repr, unknown)))) def check(self): if self._config.get("ROOT_IMAGE", None) is None: raise InvalidConfiguration("ROOT_IMAGE option is not set")
Python
0
@@ -372,41 +372,340 @@ e(%22/ -dest/path/in/chroot%22, %22/host/path +proc%22, %22/proc/%22),%0A# Include(%22/dev%22, %22/dev/%22),%0A# Include(%22/dev/pts%22, %22/dev/pts%22),%0A# Include(%22/run%22, %22/run%22),%0A# Include(%22/sys%22, %22/sys%22),%0A# Include(%22/home%22, %22/home/%22),%0A# Include(%22/etc/passwd%22, %22/etc/passwd%22),%0A# Include(%22/etc/group%22, %22/etc/group %22)%5D%0A @@ -803,16 +803,99 @@ th(%22.%22)%0A +# NUM_LOOP_DEVICES = 64 # The number of loop to ensure that exist before chrooting%0A %22%22%22%0A%0Acla
3bfd3ea70980acc02bf35b1654b6c616f4af45ec
update error page
chat/views.py
chat/views.py
import random import string from django.db import transaction from django.shortcuts import render, redirect import haikunator from .models import Room def about(request): return render(request, "chat/about.html") def home(request): return render(request, "chat/about.html") # def new_room(request): # """ # Randomly create a new room, and redirect to it. # """ # new_room = None # while not new_room: # with transaction.atomic(): # label = haikunator.haikunate() # if Room.objects.filter(label=label).exists(): # continue # new_room = Room.objects.create(label=label) # return redirect(chat_room, label=label) def create_room(request): #Create a new room for lang ren sha # if request.method == 'GET': return render(request, "chat/create_room.html", {}) else: label = request.POST['id'] if Room.objects.filter(label=label).exists(): return render(request, "chat/error.html", {'messages' : 'this name has been used'}) playNumber = 0 roleList = request.POST['cunmin'] + ',' + request.POST['langren'] playNumber = playNumber + int(request.POST['cunmin']) + int(request.POST['langren']) if request.POST.get('nvwu', False): roleList = roleList + ',' + '1' playNumber = playNumber + 1 else: roleList = roleList + ',' + '0' if request.POST.get('nvwu', False): roleList = roleList + ',' + '1' playNumber = playNumber + 1 else: roleList = roleList + ',' + '0' if request.POST.get('nvwu', False): roleList = roleList + ',' + '1' playNumber = playNumber + 1 else: roleList = roleList + ',' + '0' if request.POST.get('nvwu', False): roleList = roleList + ',' + '1' playNumber = playNumber + 1 else: roleList = roleList + ',' + '0' gameStart = 0 new_room = Room.objects.create(label=label, gameStart=gameStart, playerNumber=playNumber, roleList=roleList) return redirect(chat_room, label=label) def join_room(request): #Create a new room for lang ren sha # label = request.POST['label'] return redirect(chat_room, label=label) def chat_room(request, label): """ Room view - show the room, with latest messages. The template for this view has the WebSocket business to send and stream messages, so see the template for where the magic happens. """ # If the room with the given label doesn't exist, automatically create it # upon first visit (a la etherpad). room = Room.objects.filter(label=label).first() # We want to show the last 50 messages, ordered most-recent-last messages = reversed(room.messages.order_by('-timestamp')[:50]) return render(request, "chat/room.html", { 'room': room, 'messages': messages, })
Python
0.000001
@@ -2241,24 +2241,114 @@ n sha%0A #%0A + if request.method == 'GET':%0A return render(request, %22chat/join_room.html%22, %7B%7D)%0A label =
e5a3a49cfe6953160e6e3fbdf1ce9f55dafb2b40
Change data-checker messages to use Python logger
check_data.py
check_data.py
#!/usr/bin/env python from __future__ import print_function import argparse import os import sys from six import string_types import yaml # ABCs moved in Python 3, but six doesn't keep track of them. try: from collections.abc import Sequence except ImportError: from collections import Sequence REPO_ROOT = os.path.dirname(__file__) parser = argparse.ArgumentParser( description='Verify the format of a ' 'Welcome to Night Vale episode data file') parser.add_argument( '--data_file', '-d', type=argparse.FileType('r'), default=os.path.join(REPO_ROOT, 'episode_info.yaml'), help='YAML file with episode segment information') def check_overall_data_type(all_episode_data): """ The file should describe a list or other sequence. """ ok = ( isinstance(all_episode_data, Sequence) and not isinstance(all_episode_data, string_types)) if not ok: raise TypeError('Top-level data structure is not a list') def check_required_segment_data(segment): """ Make sure the segment has all required fields. """ try: title = segment['title'] except KeyError: raise KeyError('Segment is missing its title') if not isinstance(title, string_types): raise TypeError('Segment title must be a string') try: start = segment['start'] except KeyError: raise KeyError('Segment is missing its start time') if not isinstance(start, Sequence): raise TypeError('Segment start time must be a list of length 2') if len(start) < 2: raise TypeError('Segment start time must have two elements') try: start_minutes = float(start[0]) except ValueError: raise TypeError('Segment start minute must be castable to float') if start_minutes < 0: raise ValueError('Segment start minute must not be negative') try: start_seconds = float(start[1]) except ValueError: raise TypeError('Segment start second must be castable to float') if start_seconds < 0: raise ValueError('Segment start second must not be negative') def check_required_episode_data(episode): """ Make sure the episode has all required fields. """ try: episode_number = episode['episode_number'] except KeyError: raise KeyError('Episode is missing its episode number') if not ( isinstance(episode_number, int) or isinstance(episode_number, string_types)): raise TypeError('Episode number must be a string or an integer') try: title = episode['title'] except KeyError: raise KeyError('Episode is missing its title') if not isinstance(title, string_types): raise TypeError('Episode title must be a string') try: mp3_url = episode['mp3_url'] except KeyError: raise KeyError('Episode is missing its MP3 URL') if not isinstance(mp3_url, string_types): raise TypeError('Episode MP3 URL must be a string') try: segments = episode['segments'] except KeyError: raise KeyError('Episode is missing its segments') if not isinstance(segments, Sequence): raise TypeError('Episode MP3 URL must be a list') if not segments: raise ValueError('Episode must have at least one segment') for segment in segments: check_required_segment_data(segment) print(' Segment data OK for "{title}"'.format(**segment)) def main(*args): script_args = parser.parse_args(args) all_episode_data = yaml.safe_load(script_args.data_file) check_overall_data_type(all_episode_data) print('Overall data type OK\n') for episode in all_episode_data: check_required_episode_data(episode) print('Episode data OK for "{title}"\n'.format(**episode)) print('All OK!') if __name__ == '__main__': main(*sys.argv[1:])
Python
0
@@ -19,47 +19,8 @@ hon%0A -from __future__ import print_function%0A%0A impo @@ -30,16 +30,31 @@ argparse +%0Aimport logging %0Aimport @@ -630,24 +630,253 @@ rmation')%0A%0A%0A +logger = logging.getLogger(__name__)%0Alogger.setLevel(logging.DEBUG)%0Aformatter = logging.Formatter('%25(message)s')%0Astream_handler = logging.StreamHandler()%0Astream_handler.setFormatter(formatter)%0Alogger.addHandler(stream_handler)%0A%0A%0A def check_ov @@ -3624,21 +3624,27 @@ -print +logger.info (' Se @@ -3864,21 +3864,27 @@ ta)%0A -print +logger.info ('Overal @@ -3993,21 +3993,27 @@ -print +logger.info ('Episod @@ -4067,13 +4067,19 @@ -print +logger.info ('Al
a6acf8a68ee5b2ef185f279b6169a34c2b70896d
Increase feature version
acmd/__init__.py
acmd/__init__.py
# coding: utf-8 """ aem-cmd main module. """ __version__ = '0.11.1b' # Standard error codes that can be returned from any tool. OK = 0 UNCHANGED = 1 USER_ERROR = 4711 CONFIG_ERROR = 4712 SERVER_ERROR = 4713 INTERNAL_ERROR = 4714 import acmd.logger init_log = acmd.logger.init_log log = acmd.logger.log warning = acmd.logger.warning error = acmd.logger.error import acmd.server Server = acmd.server.Server import acmd.config read_config = acmd.config.read_config get_rcfilename = acmd.config.get_rcfilename import acmd.deploy setup_rcfile = acmd.deploy.setup_rcfile deploy_bash_completion = acmd.deploy.deploy_bash_completion get_current_version = acmd.deploy.get_current_version import acmd.props parse_properties = acmd.props.parse_properties import acmd.repo tool_repo = acmd.repo.tool_repo tool = acmd.repo.tool import_projects = acmd.repo.import_projects
Python
0
@@ -61,11 +61,11 @@ '0.1 -1.1 +2.0 b'%0A%0A
1ac48168baf8f18870bca0be2709838120eecd93
add validation to `offline.iplot`
plotly/offline/offline.py
plotly/offline/offline.py
""" Plotly Offline A module to use Plotly's graphing library with Python without connecting to a public or private plotly enterprise server. """ from __future__ import absolute_import import json import os import uuid from pkg_resources import resource_string import warnings from plotly import session, tools, utils from plotly.exceptions import PlotlyError __PLOTLY_OFFLINE_INITIALIZED = False def download_plotlyjs(download_url): warnings.warn(''' `download_plotlyjs` is deprecated and will be removed in the next release. plotly.js is shipped with this module, it is no longer necessary to download this bundle separately. ''', DeprecationWarning) pass def get_plotlyjs(): path = os.path.join('offline', 'plotly.min.js') plotlyjs = resource_string('plotly', path).decode('utf-8') return plotlyjs def init_notebook_mode(): """ Initialize Plotly Offline mode in an IPython Notebook. Run this function at the start of an IPython notebook to load the necessary javascript files for creating Plotly graphs with plotly.offline.iplot. """ if not tools._ipython_imported: raise ImportError('`iplot` can only run inside an IPython Notebook.') from IPython.display import HTML, display global __PLOTLY_OFFLINE_INITIALIZED __PLOTLY_OFFLINE_INITIALIZED = True display(HTML('<script type="text/javascript">' + # ipython's includes `require` as a global, which # conflicts with plotly.js. so, unrequire it. 'require=requirejs=define=undefined;' + '</script>' + '<script type="text/javascript">' + get_plotlyjs() + '</script>')) def iplot(figure_or_data, show_link=True, link_text='Export to plot.ly'): """ Draw plotly graphs inside an IPython notebook without connecting to an external server. To save the chart to Plotly Cloud or Plotly Enterprise, use `plotly.plotly.iplot`. To embed an image of the chart, use `plotly.image.ishow`. figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or dict or list that describes a Plotly graph. See https://plot.ly/python/ for examples of graph descriptions. Keyword arguments: show_link (default=True) -- display a link in the bottom-right corner of of the chart that will export the chart to Plotly Cloud or Plotly Enterprise link_text (default='Export to plot.ly') -- the text of export link Example: ``` from plotly.offline import init_notebook_mode, iplot init_notebook_mode() iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}]) ``` """ if not __PLOTLY_OFFLINE_INITIALIZED: raise PlotlyError('\n'.join([ 'Plotly Offline mode has not been initialized in this notebook. ' 'Run: ', '', 'import plotly', 'plotly.offline.init_notebook_mode() ' '# run at the start of every ipython notebook', ])) if not tools._ipython_imported: raise ImportError('`iplot` can only run inside an IPython Notebook.') from IPython.display import HTML, display if isinstance(figure_or_data, dict): data = figure_or_data['data'] layout = figure_or_data.get('layout', {}) else: data = figure_or_data layout = {} width = layout.get('width', '100%') height = layout.get('height', 525) try: float(width) except (ValueError, TypeError): pass else: width = str(width) + 'px' try: float(width) except (ValueError, TypeError): pass else: width = str(width) + 'px' plotdivid = uuid.uuid4() jdata = json.dumps(data, cls=utils.PlotlyJSONEncoder) jlayout = json.dumps(layout, cls=utils.PlotlyJSONEncoder) if show_link is False: link_text = '' plotly_platform_url = session.get_session_config().get('plotly_domain', 'https://plot.ly') if (plotly_platform_url != 'https://plot.ly' and link_text == 'Export to plot.ly'): link_domain = plotly_platform_url\ .replace('https://', '')\ .replace('http://', '') link_text = link_text.replace('plot.ly', link_domain) display(HTML( '<script type="text/javascript">' 'window.PLOTLYENV=window.PLOTLYENV || {};' 'window.PLOTLYENV.BASE_URL="' + plotly_platform_url + '";' 'Plotly.LINKTEXT = "' + link_text + '";' '</script>' )) script = '\n'.join([ 'Plotly.plot("{id}", {data}, {layout}).then(function() {{', ' $(".{id}.loading").remove();', '}})' ]).format(id=plotdivid, data=jdata, layout=jlayout, link_text=link_text) display(HTML('' '<div class="{id} loading" style="color: rgb(50,50,50);">' 'Drawing...</div>' '<div id="{id}" style="height: {height}; width: {width};" ' 'class="plotly-graph-div">' '</div>' '<script type="text/javascript">' '{script}' '</script>' ''.format(id=plotdivid, script=script, height=height, width=width))) def plot(): """ Configured to work with localhost Plotly graph viewer """ raise NotImplementedError
Python
0
@@ -1826,16 +1826,41 @@ plot.ly' +,%0A validate=True ):%0A %22 @@ -2676,16 +2676,388 @@ ort link +%0A validate (default=True) -- validate that all of the keys in the figure%0A are valid? omit if your version of plotly.js%0A has become outdated with your version of%0A graph_reference.json or if you need to include%0A extra, unnecessary keys in your figure. %0A%0A Ex @@ -3731,212 +3731,120 @@ -if isinstance(figure_or_data, dict):%0A data = figure_or_data%5B'data'%5D%0A layout = figure_or_data.get('layout', %7B%7D)%0A else:%0A data = figure_or_data%0A +figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)%0A%0A width = figure.get(' layout - = %7B%7D%0A%0A width = layout +', %7B%7D) .get @@ -3874,22 +3874,40 @@ eight = -layout +figure.get('layout', %7B%7D) .get('he @@ -4218,20 +4218,38 @@ n.dumps( -data +figure.get('data', %5B%5D) , cls=ut @@ -4296,22 +4296,40 @@ n.dumps( -layout +figure.get('layout', %7B%7D) , cls=ut
61c9d4f6798d81d6ae6d2e5641a8432121de52fa
Implement function: get_network_adapters
cloudbaseinit/osutils/freebsd.py
cloudbaseinit/osutils/freebsd.py
from cloudbaseinit.osutils import base import subprocess class FreeBSDUtils(base.BaseOSUtils): def reboot(self): if ( os.system('reboot') != 0 ): raise Exception('Reboot failed') def user_exists(self, username): try: subprocess.check_output(["id", username]) except CalledProcessError: return False return True # not completed def create_user(self, username, password, invite_group, password_expires=False): """ invite_group must be a list of string. """ home_dir = '/home/' + username user_shell = '/bin/tcsh' user_comment = 'Created by bsdcloud-init' grouplist = '' assert isinstance(invite_group, list), "invite_group must be a list." assert invite_group, "invite_group cannot be empty." for i in invite_group: grouplist += i+',' grouplist = grouplist[:-1] pw_cmd = "echo " + password + " | pw useradd -n " + username + " -c '" + user_comment + "' -d '" + user_shell + "' -s /bin/tcsh -h 0 -G " + grouplist subprocess.check_call(pw_cmd, shell=True) subprocess.check_call("mkdir %s" % (home_dir), shell=True) subprocess.check_call("chown -R %s:%s %s" % (username, username, home_dir), shell=True) def set_host_name(self, new_host_name): try: subprocess.check_output(["hostname", new_host_name]) cmd_newhost = "[ -z `egrep '^hostname' /etc/rc.conf` ] && { echo 'hostname=\"%s\"' >> /etc/rc.conf } || { sed -e 's/^hostname=.*$/hostname=\"%s\"/' -I '' /etc/rc.conf }" % (new_host_name, new_host_name) subprocess.check_output(cmd_newhost, shell=True) return False except CalledProcessError: raise Exception(CalledProcessError.output) def sanitize_shell_input(self, value): pass def set_user_password(self, username, password): pw_cmd = "echo " + password + " | pw usermod -n " + username + " -h 0" subprocess.check_call(pw_cmd, shell=True) def add_user_to_local_group(self, username, groupname): pass def get_user_home(self, username): pass def get_network_adapters(self): pass def set_static_network_config(self, adapter_name, address, netmask, broadcast, gateway, dnsdomain, dnsnameservers): pass def set_config_value(self, name, value, section=None): pass def get_config_value(self, name, section=None): pass def wait_for_boot_completion(self): pass def terminate(self): pass def get_default_gateway(self): """ We cannot handle mutiple default gateway. """ interface = subprocess.check_output("route get default | grep interface", shell=True).split()[1] gateway_ip = subprocess.check_output("route get default | grep gateway", shell=True).split()[1] return (interface, gateway_ip) def check_static_route_exists(self, destination): pass def add_static_route(self, destination, mask, next_hop, interface_index, metric): pass def get_os_version(self): pass def get_volume_label(self, drive): pass
Python
0.999999
@@ -2235,36 +2235,198 @@ (self):%0A -pass +%22%22%22%0A This fucntion will return a list of interface.%0A %22%22%22%0A if_list = subprocess.check_output(%5B'ifconfig', '-l'%5D).split(' ')%0A return if_list %0A%0A def set_st
7f1af658690d284946c6b25172d215c79f61e6ea
use adapter in test
corehq/apps/userreports/tests/test_data_source_repeats.py
corehq/apps/userreports/tests/test_data_source_repeats.py
import json import os import datetime from django.test import SimpleTestCase, TestCase from corehq.apps.userreports.models import DataSourceConfiguration from corehq.apps.userreports.sql import IndicatorSqlAdapter from corehq.db import connection_manager DOC_ID = 'repeat-id' DAY_OF_WEEK = 'monday' class RepeatDataSourceTestMixin(object): def setUp(self): folder = os.path.join(os.path.dirname(__file__), 'data', 'configs') sample_file = os.path.join(folder, 'data_source_with_repeat.json') with open(sample_file) as f: self.config = DataSourceConfiguration.wrap(json.loads(f.read())) class RepeatDataSourceConfigurationTest(RepeatDataSourceTestMixin, SimpleTestCase): def test_test_doc_matches(self): self.assertTrue(self.config.filter(_test_doc())) def test_empty_doc_no_rows(self): self.assertEqual([], self.config.get_all_values(_test_doc())) def test_missing_property_no_rows(self): self.assertEqual([], self.config.get_all_values(_test_doc(form={}))) def test_null_property_no_rows(self): self.assertEqual([], self.config.get_all_values(_test_doc(form={"time_logs": None}))) def test_empty_list_property_no_rows(self): self.assertEqual([], self.config.get_all_values(_test_doc(form={"time_logs": []}))) def test_dict_property(self): start = datetime.datetime.utcnow() end = start + datetime.timedelta(minutes=30) rows = self.config.get_all_values(_test_doc(form={"time_logs": { "start_time": start, "end_time": end, "person": "al" }})) self.assertEqual(1, len(rows)) doc_id_ind, inserted_at, repeat_iteration, start_ind, end_ind, person_ind, created_base_ind = rows[0] self.assertEqual(DOC_ID, doc_id_ind.value) self.assertEqual(0, repeat_iteration.value) self.assertEqual(start, start_ind.value) self.assertEqual(end, end_ind.value) self.assertEqual('al', person_ind.value) self.assertEqual(DAY_OF_WEEK, created_base_ind.value) def test_list_property(self): now = datetime.datetime.utcnow() one_hour = datetime.timedelta(hours=1) logs = [ {"start_time": now, "end_time": now + one_hour, "person": "al"}, {"start_time": now + one_hour, "end_time": now + (one_hour * 2), "person": "chris"}, {"start_time": now + (one_hour * 2), "end_time": now + (one_hour * 3), "person": "katie"}, ] rows = self.config.get_all_values(_test_doc(form={"time_logs": logs})) self.assertEqual(len(logs), len(rows)) for i, row in enumerate(rows): doc_id_ind, inserted_at, repeat_iteration, start_ind, end_ind, person_ind, created_base_ind = row self.assertEqual(DOC_ID, doc_id_ind.value) self.assertEqual(logs[i]['start_time'], start_ind.value) self.assertEqual(i, repeat_iteration.value) self.assertEqual(logs[i]['end_time'], end_ind.value) self.assertEqual(logs[i]['person'], person_ind.value) self.assertEqual(DAY_OF_WEEK, created_base_ind.value) class RepeatDataSourceBuildTest(RepeatDataSourceTestMixin, TestCase): def test_table_population(self): engine = connection_manager.get_engine() adapter = IndicatorSqlAdapter(self.config) # Delete and create table adapter.rebuild_table() # Create a doc now = datetime.datetime.now() one_hour = datetime.timedelta(hours=1) logs = [ {"start_time": now, "end_time": now + one_hour, "person": "al"}, {"start_time": now + one_hour, "end_time": now + (one_hour * 2), "person": "chris"}, {"start_time": now + (one_hour * 2), "end_time": now + (one_hour * 3), "person": "katie"}, ] doc = _test_doc(form={'time_logs': logs}) # Save this document into the table adapter.save(doc) # Get rows from the table with engine.connect() as connection: rows = connection.execute(adapter.get_table().select()) retrieved_logs = [ { 'start_time': r[3], 'end_time': r[4], 'person': r[5], } for r in rows ] # Check those rows against the expected result self.assertItemsEqual( retrieved_logs, logs, "The repeat data saved in the data source table did not match the expected data!" ) def _test_doc(**extras): test_doc = { "_id": DOC_ID, "domain": "user-reports", "doc_type": "XFormInstance", "created": DAY_OF_WEEK } test_doc.update(extras) return test_doc
Python
0
@@ -211,49 +211,8 @@ ter%0A -from corehq.db import connection_manager%0A %0A%0ADO @@ -3207,57 +3207,8 @@ ):%0A%0A - engine = connection_manager.get_engine()%0A @@ -3901,83 +3901,15 @@ -with engine.connect() as connection:%0A rows = connection.execute( +rows = adap @@ -3920,25 +3920,22 @@ get_ -table().sel +query_obj ect() -) %0A @@ -4007,11 +4007,19 @@ ': r -%5B3%5D +.start_time ,%0A @@ -4049,11 +4049,17 @@ ': r -%5B4%5D +.end_time ,%0A @@ -4087,11 +4087,15 @@ ': r -%5B5%5D +.person ,%0A%0A @@ -4131,17 +4131,16 @@ %5D%0A -%0A
deb96907dc9c96e0ff8772d14cad765cc5e47602
improve crawler
crawler/pagecrawler/pagecrawler/spiders/article_spider.py
crawler/pagecrawler/pagecrawler/spiders/article_spider.py
import scrapy from pagecrawler.items import ArticlecrawlerItem from pagecrawler.model_article import Articles class ArticleSpider(scrapy.Spider): name = "articlespider" filename = "delicious_article_dataset.dat" # load url in bookmarks from dataset start_urls = [] crawled_urls = {} # url_count = 100 counter = 0 with open(filename, 'r') as f: for row in f: # url_count -= 1 # if url_count <= 0: # break fields = row.split("\t") if not fields[3].startswith("http"): continue start_urls.append(fields[3]) print "field:" + fields[3] print start_urls def parse(self, response): items = [] return self.parse_article(response) def parse_article(self, response): self.log("==========Scraping:========= " + response.url) item = ArticlecrawlerItem() ArticleSpider.counter += 1 item['link'] = response.url item['title'] = response.xpath('//title/text()').extract() item['summary'] = response.xpath('//meta[@name="description"]/@content').extract() item['keywords'] = response.xpath('//meta[@name="news_keywords"]/@content').extract() item['text'] = response.xpath('//body//p//text()').extract() self.log("=========filled in item for:========" + response.url) # e.g. "indexing function", link = item.[]('link') if len(item['title']) == 0: return title = item['title'][0] link = response.url.lower() if link.startswith("https://www.youtube.com/"): return if link in ArticleSpider.crawled_urls: return else: ArticleSpider.crawled_urls[link] = True if len(item['summary']) == 0: return summary = item['summary'][0].rstrip('\r\n') if len(summary) == 0: return keywords = "" if len(item['keywords']) > 0: keywords = ', '.join(item['keywords']) if len(item['text']) == 0: return text = ' '.join(item['text']) if len(text) < 10: return print "createing article" article = Articles.create(title=title, link=link, summary=summary, keywords=keywords, text=text) print "#################################" + str(ArticleSpider.counter) + "/" + str(len(ArticleSpider.start_urls)) + "###########################" # yield item
Python
0.000129
@@ -334,16 +334,40 @@ nter = 0 +%0A written_counter = 0 %0A%0A wi @@ -2263,16 +2263,59 @@ rticle%22%0A + ArticleSpider.written_counter += 1%0A @@ -2453,22 +2453,17 @@ ######## -###### + %22 + str( @@ -2531,16 +2531,76 @@ ls)) + %22 + written %22 + str(ArticleSpider.written_counter) + %22 ###### ########
5361eec646a2ba13c56a475273a4522e60c0eac1
Optimize rotmg command
Discord/cogs/rotmg.py
Discord/cogs/rotmg.py
import discord from discord.ext import commands from utilities import checks def setup(bot): bot.add_cog(RotMG(bot)) class RotMG(commands.Cog): def __init__(self, bot): self.bot = bot async def cog_check(self, ctx): return await checks.not_forbidden().predicate(ctx) @commands.group(aliases = ["realmofthemadgod"], invoke_without_command = True, case_insensitive = True) async def rotmg(self, ctx, player: str): '''Realm of the Mad God player information''' url = f"https://nightfirec.at/realmeye-api/" params = {"player": player} # http://webhost.ischool.uw.edu/~joatwood/realmeye_api/0.3/ async with ctx.bot.aiohttp_session.get(url, params = params) as resp: data = await resp.json() if "error" in data: await ctx.embed_reply("Error: " + data["error"]) return fields = [("Characters", data["chars"]), ("Total Fame", f"{data['fame']:,}"), ("Fame Rank", f"{data['fame_rank']:,}"), ("Class Quests Completed", data["rank"]), ("Account Fame", f"{data['account_fame']:,}"), ("Account Fame Rank", f"{data['account_fame_rank']:,}")] if created := data.get("created"): fields.append(("Created", created)) fields.extend((("Total Exp", f"{data['exp']:,}"), ("Exp Rank", f"{data['exp_rank']:,}"), ("Last Seen", data["player_last_seen"]))) if guild := data.get("guild"): fields.extend((("Guild", guild), ("Guild Position", data["guild_rank"]))) if data["desc1"] or data["desc2"] or data["desc3"]: fields.append(("Description", f"{data['desc1']}\n{data['desc2']}\n{data['desc3']}")) await ctx.embed_reply(title = data["player"], title_url = f"https://www.realmeye.com/player/{player}", description = "Donator" if data["donator"] == "true" else discord.Embed.Empty, fields = fields) @rotmg.command(name = "characters") async def rotmg_characters(self, ctx, player : str): '''Realm of the Mad God player characters information''' url = "https://nightfirec.at/realmeye-api/?player={}".format(player) # http://webhost.ischool.uw.edu/~joatwood/realmeye_api/0.3/ async with ctx.bot.aiohttp_session.get(url) as resp: data = await resp.json() if "error" in data: await ctx.embed_reply("Error: " + data["error"]) return embed = discord.Embed(title = "{}'s Characters".format(data["player"]), color = ctx.bot.bot_color) embed.set_author(name = ctx.author.display_name, icon_url = ctx.author.avatar_url) for character in data["characters"]: value = "Fame: {0[fame]:,}, Exp: {0[exp]:,}, Rank: {0[place]:,}, Class Quests Completed: {0[cqc]}, Stats Maxed: {0[stats_maxed]}".format(character) value += "\nHP: {0[hp]}, MP: {0[mp]}, Attack: {0[attack]}, Defense: {0[defense]}, Speed: {0[speed]}, Vitality: {0[vitality]}, Wisdom: {0[wisdom]}, Dexterity: {0[dexterity]}".format(character["stats"]) equips = [] for type, equip in character["equips"].items(): equips.append("{}: {}".format(type.capitalize(), equip)) value += '\n' + ", ".join(equips) value += "\nPet: {0[pet]}, Clothing Dye: {0[character_dyes][clothing_dye]}, Accessory Dye: {0[character_dyes][accessory_dye]}, Backpack: {0[backpack]}".format(character) value += "\nLast Seen: {0[last_seen]}, Last Server: {0[last_server]}".format(character) embed.add_field(name = "Level {0[level]} {0[class]}".format(character), value = value, inline = False) await ctx.send(embed = embed)
Python
0
@@ -728,32 +728,39 @@ or%22 in data:%0A%09%09%09 +return await ctx.embed_ @@ -792,26 +792,16 @@ rror%22%5D)%0A -%09%09%09return%0A %09%09fields
39cd42fa27b87b9b1604635236f8860759a4a8db
Set Dialog's orientation to vertical
ELiDE/ELiDE/dialog.py
ELiDE/ELiDE/dialog.py
"""Generic dialog boxes and menus, for in front of a Board Mostly these will be added as children of KvLayoutFront but you could use them independently if you wanted. """ from kivy.properties import DictProperty, ListProperty, StringProperty, NumericProperty, VariableListProperty from kivy.core.text import DEFAULT_FONT from kivy.uix.boxlayout import BoxLayout from kivy.uix.button import Button from kivy.uix.widget import Widget from kivy.lang import Builder class Box(Widget): padding = VariableListProperty([6, 6, 6, 6]) border = ListProperty([4, 4, 4, 4]) font_size = StringProperty('15sp') font_name = StringProperty(DEFAULT_FONT) background = StringProperty( 'atlas://data/images/defaulttheme/textinput') background_color = ListProperty([1, 1, 1, 1]) foreground_color = ListProperty([0, 0, 0, 1]) class MessageBox(Box): """Looks like a TextInput but doesn't accept any input. Does support styled text with BBcode. """ line_spacing = NumericProperty(0) text = StringProperty() class DialogMenu(Box, BoxLayout): """Some buttons that make the game do things.""" options = ListProperty() """List of pairs of (button_text, partial)""" funcs = DictProperty({}) """Dict of functions to be used in place of string partials in the options""" def on_options(self, *args): self.clear_widgets() for txt, part in self.options: if not callable(part): part = self.funcs[part] self.add_widget(Button(text=txt, on_press=part)) class Dialog(BoxLayout): """MessageBox with a DialogMenu beneath it""" message_kwargs = DictProperty({}) menu_kwargs = DictProperty({}) def on_message_kwargs(self, *args): for k, v in self.message_kwargs.items(): setattr(self.ids.msg, k, v) def on_menu_kwargs(self, *args): for k, v in self.menu_kwargs.items(): setattr(self.ids.menu, k, v) Builder.load_string(""" <MessageBox>: canvas.before: Color: rgba: self.background_color BorderImage: border: self.border pos: self.pos size: self.size source: self.background ScrollView: id: sv do_scroll_x: False size: root.width - root.border[1] - root.border[3], root.height - root.border[0] - root.border[2] Label: markup: True text: root.text font_name: root.font_name font_size: root.font_size line_spacing: root.line_spacing width: sv.width size_hint_y: None text_size: self.size <DialogMenu>: canvas.before: Color: rgba: self.background_color BorderImage: border: self.border pos: self.pos size: self.size source: self.background orientation: 'vertical' <Dialog>: MessageBox: id: msg ScrollView: DialogMenu: size_hint_y: None id: menu """) if __name__ == "__main__": from kivy.base import runTouchApp dia = Dialog( message_kwargs={'text': 'I am a dialog'}, menu_kwargs={'options': [('one', lambda: None), ('two', lambda: None)]} )
Python
0
@@ -2932,16 +2932,44 @@ ialog%3E:%0A + orientation: 'vertical'%0A Mess
69c590d7cf2d328b9e6ef63ddf49933e67df9614
fix typo
statsd/gauge.py
statsd/gauge.py
import statsd class Gauge(statsd.Client): '''Class to implement a statd gauge ''' def send(self, subname, value): '''Send the data to statsd via self.connection :keyword subname: The subname to report the data to (appended to the client name) :keyword value: The gauge value to send ''' name = self._get_name(self.name, subname) self.logger.info('%s: %d', name, value) return statsd.Client._send(self, {name: '%d|g' % value})
Python
0.999991
@@ -69,16 +69,17 @@ t a stat +s d gauge%0A
b97edcc911419197099338085f0f2937286dead0
Bump version
galaxy/__init__.py
galaxy/__init__.py
# (c) 2012-2014, Ansible, Inc. <[email protected]> # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os.path import sys import warnings __version__ = '2.0.0' __all__ = ['__version__'] def find_commands(management_dir): # Modified version of function from django/core/management/__init__.py. command_dir = os.path.join(management_dir, 'commands') commands = [] try: for f in os.listdir(command_dir): if f.startswith('_'): continue elif f.endswith('.py') and f[:-3] not in commands: commands.append(f[:-3]) elif f.endswith('.pyc') and f[:-4] not in commands: commands.append(f[:-4]) except OSError: pass return commands def prepare_env(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'galaxy.settings') local_site_packages = os.path.join(os.path.dirname(__file__), 'lib', 'site-packages') sys.path.insert(0, local_site_packages) from django.conf import settings if not settings.DEBUG: warnings.simplefilter('ignore', DeprecationWarning) import django.utils settings.version = __version__ def manage(): # Prepare the galaxy environment. prepare_env() # Now run the command (or display the version). from django.core.management import execute_from_command_line if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): sys.stdout.write('galaxy-%s\n' % __version__) else: execute_from_command_line(sys.argv)
Python
0
@@ -784,17 +784,17 @@ = '2.0. -0 +1 '%0A__all_
b99de7f7d91c1be99ee7ea04da997d48126fd08b
fix model finance
finance/models.py
finance/models.py
from django.db import models from datetime import datetime class TypeLaunch(models.Model): type_name = models.CharField(max_length=100, unique=True) class Provider(models.Model): description = models.CharField(max_length=100, unique=True) type_launch = models.ForeignKey(TypeLaunch, blank=True, null=True) date_last_purchase = models.DateTimeField('date last purchase') value_total = models.DecimalField(max_digits=5, decimal_places=2) class Extract(models.Model): date_launch = models.DateTimeField('date launch') launch = models.CharField(max_length=100) date_purchase = models.DateTimeField('date purchase') value_debit = models.DecimalField(max_digits=5, decimal_places=2) value_credit = models.DecimalField(max_digits=5, decimal_places=2) value_balance = models.DecimalField(max_digits=5, decimal_places=2) cancelled = models.BooleanField(default=True, db_index=True) provider = models.ForeignKey(Provider, blank=True, null=True) def str_to_date(self, date_launch, launch, year): #import pdb; pdb.set_trace() if launch.strip()[-3] == '/': date = launch.split('-')[-1].strip() date = date.replace('/','-') + '-' + str(year) return datetime.strptime(date, '%d-%m-%Y').date() def str_to_float(self, value): return float(value.replace(',','.')) def importer(self, path): with open(path, 'r') as ff: contents = ff.readlines() line = 0 extract = Extract() while line <= len(contents): date_launch, launch, value = contents[line].split(';') extract.date_launch = datetime.strptime(date_launch, '%d-%m-%Y').date() extract.launch = launch.strip() #.split('-')[:-1] year = extract.str_to_date(date_launch).year extract.date_purchase = extract.str_to_date(date_launch, launch, year) if extract.str_to_float(value) < 0: extract.value_debit = extract.str_to_float(value) extract.value_credit = 0 else: extract.value_debit = 0 extract.value_credit = extract.str_to_float(value) extract.value_balance = 0 extract.save() line += 1 ff.close()
Python
0.000001
@@ -343,36 +343,32 @@ se = models.Date -Time Field('date last @@ -501,36 +501,32 @@ ch = models.Date -Time Field('date laun @@ -611,12 +611,8 @@ Date -Time Fiel @@ -1020,22 +1020,19 @@ , launch -, year +='' ):%0A @@ -1038,123 +1038,170 @@ -#import pdb; pdb.set_trace()%0A if launch.strip()%5B-3%5D == '/':%0A date = launch.split('-')%5B-1%5D.strip() +date = date_launch.replace('/','-')%0A if not launch is '' and launch.strip()%5B-3%5D == '/':%0A year = datetime.strptime(date, '%25d-%25m-%25Y').date().year %0A @@ -1216,20 +1216,35 @@ date = -date +launch.strip()%5B-5:%5D .replace @@ -1723,56 +1723,39 @@ h = -datetime.strptime(date_launch, '%25d-%25m-%25Y').date( +extract.str_to_date(date_launch )%0A @@ -1824,69 +1824,8 @@ -1%5D%0A - year = extract.str_to_date(date_launch).year%0A @@ -1903,14 +1903,8 @@ unch -, year )%0A%0A @@ -2245,24 +2245,68 @@ balance = 0%0A + import pdb; pdb.set_trace()%0A
fab74bb78de0b2f54997198ed1837cf986bbdf3b
Fix ruby gems tests, again
tests/integration/modules/test_gem.py
tests/integration/modules/test_gem.py
# -*- coding: utf-8 -*- ''' Integration tests for Ruby Gem module ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt Testing libs from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest # Import salt libs import salt.utils.path # Import 3rd-party libs from tornado.httpclient import HTTPClient GEM = 'tidy' GEM_VER = '1.1.2' OLD_GEM = 'brass' OLD_VERSION = '1.0.0' NEW_VERSION = '1.2.1' GEM_LIST = [GEM, OLD_GEM] def check_status(): ''' Check the status of the rubygems source ''' try: return HTTPClient().fetch('https://rubygems.org').code == 200 except Exception: # pylint: disable=broad-except return False @destructiveTest @skipIf(not salt.utils.path.which('gem'), 'Gem is not available') class GemModuleTest(ModuleCase): ''' Validate gem module ''' def setUp(self): if check_status() is False: self.skipTest('External resource \'https://rubygems.org\' is not available') def test_install_uninstall(self): ''' gem.install gem.uninstall ''' # Remove gem if it is already installed if self.run_function('gem.list', [GEM]): self.run_function('gem.uninstall', [GEM]) self.run_function('gem.install', [GEM]) gem_list = self.run_function('gem.list', [GEM]) self.assertIn(GEM, gem_list) self.run_function('gem.uninstall', [GEM]) self.assertFalse(self.run_function('gem.list', [GEM])) def test_install_version(self): ''' gem.install rake version=11.1.2 ''' # Remove gem if it is already installed if self.run_function('gem.list', [GEM]): self.run_function('gem.uninstall', [GEM]) self.run_function('gem.install', [GEM], version=GEM_VER) gem_list = self.run_function('gem.list', [GEM]) self.assertIn(GEM, gem_list) self.assertIn(GEM_VER, gem_list[GEM]) self.run_function('gem.uninstall', [GEM]) self.assertFalse(self.run_function('gem.list', [GEM])) def test_list(self): ''' gem.list ''' self.run_function('gem.install', [' '.join(GEM_LIST)]) all_ret = self.run_function('gem.list') for gem in GEM_LIST: self.assertIn(gem, all_ret) single_ret = self.run_function('gem.list', [GEM]) self.assertIn(GEM, single_ret) self.run_function('gem.uninstall', [' '.join(GEM_LIST)]) def test_list_upgrades(self): ''' gem.list_upgrades ''' # install outdated gem self.run_function('gem.install', [OLD_GEM], version=OLD_VERSION) ret = self.run_function('gem.list_upgrades') self.assertIn(OLD_GEM, ret) self.run_function('gem.uninstall', [OLD_GEM]) def test_sources_add_remove(self): ''' gem.sources_add gem.sources_remove ''' source = 'http://gemcutter.org/' self.run_function('gem.sources_add', [source]) sources_list = self.run_function('gem.sources_list') self.assertIn(source, sources_list) self.run_function('gem.sources_remove', [source]) sources_list = self.run_function('gem.sources_list') self.assertNotIn(source, sources_list) def test_update(self): ''' gem.update ''' # Remove gem if it is already installed if self.run_function('gem.list', [OLD_GEM]): self.run_function('gem.uninstall', [OLD_GEM]) self.run_function('gem.install', [OLD_GEM], version=OLD_VERSION) gem_list = self.run_function('gem.list', [OLD_GEM]) self.assertEqual({OLD_GEM: [OLD_VERSION]}, gem_list) self.run_function('gem.update', [OLD_GEM]) gem_list = self.run_function('gem.list', [OLD_GEM]) self.assertEqual({OLD_GEM: [NEW_VERSION, OLD_VERSION]}, gem_list) self.run_function('gem.uninstall', [OLD_GEM]) self.assertFalse(self.run_function('gem.list', [OLD_GEM])) def test_update_system(self): ''' gem.update_system ''' ret = self.run_function('gem.update_system') self.assertTrue(ret)
Python
0
@@ -3060,22 +3060,34 @@ p:// -gemcutter +production.cf.rubygems .org -/ '%0A%0A
747fa98c7a9ec7906dfba44e4860d300825eee39
Drop Py2 and six on tests/integration/modules/test_key.py
tests/integration/modules/test_key.py
tests/integration/modules/test_key.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import re import pytest from tests.support.case import ModuleCase from tests.support.helpers import slowTest @pytest.mark.windows_whitelisted class KeyModuleTest(ModuleCase): @slowTest def test_key_finger(self): """ test key.finger to ensure we receive a valid fingerprint """ out = self.run_function("key.finger") match = re.match("([0-9a-z]{2}:){15,}[0-9a-z]{2}$", out) self.assertTrue(match) @slowTest def test_key_finger_master(self): """ test key.finger_master to ensure we receive a valid fingerprint """ out = self.run_function("key.finger_master") match = re.match("([0-9a-z]{2}:){15,}[0-9a-z]{2}$", out) self.assertTrue(match)
Python
0
@@ -1,103 +1,4 @@ -# -*- coding: utf-8 -*-%0A%0Afrom __future__ import absolute_import, print_function, unicode_literals%0A%0A impo
be2a2ab6a67beef97e3c3cf42bd5eeea6c4e55cf
fix - use rpc_timeout as rpc timeout
test/functional/test_framework/test_node.py
test/functional/test_framework/test_node.py
#!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import errno import http.client import logging import os import subprocess import time from .util import ( assert_equal, get_rpc_proxy, rpc_url, ) from .authproxy import JSONRPCException class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node To make things easier for the test writer, a bit of magic is happening under the covers. Any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir): self.index = i self.datadir = os.path.join(dirname, "node" + str(i)) self.rpchost = rpchost if timewait: self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 if binary is None: self.binary = os.getenv("BITCOIND", "bitcoind") else: self.binary = binary self.stderr = stderr self.coverage_dir = coverage_dir # Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly. self.extra_args = extra_args self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i] self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) def __getattr__(self, *args, **kwargs): """Dispatches any unrecognised messages to the RPC connection.""" assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection" return self.rpc.__getattr__(*args, **kwargs) def start(self): """Start the node.""" self.process = subprocess.Popen(self.args + self.extra_args, stderr=self.stderr) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): assert self.process.poll() is None, "bitcoind exited with status %i during initialization" % self.process.returncode try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) raise AssertionError("Unable to connect to bitcoind") def get_wallet_rpc(self, wallet_name): assert self.rpc_connected assert self.rpc wallet_path = "wallet/%s" % wallet_name return self.rpc / wallet_path def stop_node(self): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is not None: # process has stopped. Assert that it didn't return an error code. assert_equal(return_code, 0) self.running = False self.process = None self.log.debug("Node stopped") return True return False def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoind to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) while not self.is_node_stopped(): time.sleep(0.1) self.rpc = None self.rpc_connected = False
Python
0
@@ -3170,16 +3170,42 @@ f.index, + timeout=self.rpc_timeout, coverag
6140507068c7a42a988bad951c1a6f120de741fb
Update cam_timeLapse_Threaded_upload.py
camera/timelapse/cam_timeLapse_Threaded_upload.py
camera/timelapse/cam_timeLapse_Threaded_upload.py
#!/usr/bin/env python2.7 import time import os from subprocess import call UPLOAD_INTERVAL = 60 def upload_file(inpath, outpath): uploadCmd = "/home/pi/Dropbox-Uploader/dropbox_uploader.sh upload %s %s" % (inpath, outpath) call ([uploadCmd], shell=True) while True: # record start_time start_time = time.time() # initiate the upload process inpath = "/home/pi/timelapse/latest/latest.jpg" outpath = "latest.jpg" if os.path.exists(inpath): upload_file(inpath,outpath) print "uploadThread: uploaded %s to %s" % (inpath,outpath) else: print "uploadThread: file %s does not exist, skipping" % (inpath) inpath = "/home/pi/timelapse/latest/latest.mp4" outpath = "latest.mp4" if os.path.exists(inpath): upload_file(inpath,outpath) print "uploadThread: uploaded %s to %s" % (inpath,outpath) else: print "uploadThread: file %s does not exist, skipping" % (inpath) # record end_time end_time = time.time() # determine elapsed time elapsed_time = end_time - start_time # determine how long to sleep sleep_time = UPLOAD_INTERVAL - elapsed_time # check for negative sleep request! if (sleep_time < 1): print "uploadThread: sleep_time < 1!!! (%s)" % sleep_time sleep_time = 1 # sleep print "uploadThread: sleeping for %s seconds" % sleep_time time.sleep(sleep_time)
Python
0
@@ -69,16 +69,279 @@ t call%0A%0A +import sys%0A%0Aclass Logger(object):%0A def __init__(self):%0A self.terminal = sys.stdout%0A self.log = open(%22logfile.log%22, %22a%22)%0A%0A def write(self, message):%0A self.terminal.write(message)%0A self.log.write(message) %0A%0Asys.stdout = Logger()%0A%0A UPLOAD_I
16fca36c2032929589a718507a74c87bee52c161
move planarAxiPotential to top-level
galpy/potential.py
galpy/potential.py
from galpy.potential_src import Potential from galpy.potential_src import planarPotential from galpy.potential_src import linearPotential from galpy.potential_src import verticalPotential from galpy.potential_src import MiyamotoNagaiPotential from galpy.potential_src import LogarithmicHaloPotential from galpy.potential_src import DoubleExponentialDiskPotential from galpy.potential_src import PowerSphericalPotential from galpy.potential_src import TwoPowerSphericalPotential from galpy.potential_src import plotRotcurve from galpy.potential_src import plotEscapecurve from galpy.potential_src import KGPotential from galpy.potential_src import interpRZPotential # # Functions # evaluatePotentials= Potential.evaluatePotentials evaluateDensities= Potential.evaluateDensities evaluateRforces= Potential.evaluateRforces evaluatephiforces= Potential.evaluatephiforces evaluatezforces= Potential.evaluatezforces RZToplanarPotential= planarPotential.RZToplanarPotential RZToverticalPotential= verticalPotential.RZToverticalPotential plotPotentials= Potential.plotPotentials plotRotcurve= plotRotcurve.plotRotcurve plotEscapecurve= plotEscapecurve.plotEscapecurve # # Classes # Potential= Potential.Potential planarPotential= planarPotential.planarPotential linearPotential= linearPotential.linearPotential MiyamotoNagaiPotential= MiyamotoNagaiPotential.MiyamotoNagaiPotential DoubleExponentialDiskPotential= DoubleExponentialDiskPotential.DoubleExponentialDiskPotential LogarithmicHaloPotential= LogarithmicHaloPotential.LogarithmicHaloPotential KeplerPotential= PowerSphericalPotential.KeplerPotential PowerSphericalPotential= PowerSphericalPotential.PowerSphericalPotential NFWPotential= TwoPowerSphericalPotential.NFWPotential JaffePotential= TwoPowerSphericalPotential.JaffePotential HernquistPotential= TwoPowerSphericalPotential.HernquistPotential TwoPowerSphericalPotential= TwoPowerSphericalPotential.TwoPowerSphericalPotential KGPotential= KGPotential.KGPotential interpRZPotential= interpRZPotential.interpRZPotential
Python
0
@@ -1199,16 +1199,71 @@ tential%0A +planarAxiPotential= planarPotential.planarAxiPotential%0A planarPo
0a4265282f240dc52acac4347636417a14274ada
update dict list in mixfeatures
code/python/seizures/features/MixFeatures.py
code/python/seizures/features/MixFeatures.py
import numpy as np from seizures.features.FeatureExtractBase import FeatureExtractBase from seizures.features.ARFeatures import ARFeatures from seizures.features.FFTFeatures import FFTFeatures from seizures.features.PLVFeatures import PLVFeatures from seizures.features.RandomFeatures import RandomFeatures from seizures.features.XCHUHFeatures import XCHUHFeatures class MixFeatures(FeatureExtractBase): """ Class to concatenate output of individual feature classes. @author V&J """ def __init__(self, features_list): """ Wittawat: features_list is a list L of dictionaries D's where D is of the form {'name': 'Name of a class extending FeatureExtractBase', 'args': 'arguments (a kwargs dictionary) to class constructor'}. ? """ self.features_list = features_list def extract(self, instance): feature_class_dict = {"ARFeatures":ARFeatures, "FFTFeatures":FFTFeatures, "PLVFeatures":PLVFeatures, "RandomFeatures":RandomFeatures} extracted_features_list = [] for feature_string in self.features_list: if feature_string['name'] in feature_class_dict: kwargs = feature_string['args'] feature_object = feature_class_dict[feature_string['name']](**kwargs) extracted_features_list.append(np.hstack(feature_object.extract(instance))) #flattened else: print "feature not in list !!!" return np.hstack(extracted_features_list) #------- end of MixFeatures --------------- class StackFeatures(FeatureExtractBase): """ A meta feature generator which stacks features generated from other FeatureExtractBase's. Semantically this feature generator is the same as MixFeatures but directly takes in objects of subclass of FeatureExtractBase, unlike MixFeatures. (I am just not comfortable passing class handle and bunch of arguments) @author Wittawat """ def __init__(self, *feature_generators): """ Input: feature_generators: a list of objects of subclass of FeatureExtractBase """ self.feature_generators = feature_generators def extract(self, instance): extracted_features_list = [] for generator in self.feature_generators: # a feature vector assert(isinstance(generator, FeatureExtractBase)) feature = generator.extract(instance) extracted_features_list.append(np.hstack(feature)); return np.hstack(extracted_features_list) def __str__(self): subs = [str(e) for e in self.feature_generators] return 'Stack' + '(%s)'% (', '.join(subs))
Python
0
@@ -358,16 +358,133 @@ eatures%0A +from seizures.features.SEFeatures import SEFeatures%0Afrom seizures.features.LyapunovFeatures import LyapunovFeatures%0A%0A %0Aclass M @@ -1209,16 +1209,138 @@ Features +,%0A %22SEFeatures%22:SEFeatures,%0A %22LyapunovFeatures%22:LyapunovFeatures %7D%0A
198e5256063d43006b5c245866604f5bd746cfcd
Allow the plugin to be loaded from a query
plugins/Success/plugin.py
plugins/Success/plugin.py
### # Copyright (c) 2005, Daniel DiPaolo # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import supybot.conf as conf from supybot.commands import * import supybot.plugins as plugins import supybot.ircutils as ircutils from supybot.i18n import PluginInternationalization, internationalizeDocstring _ = PluginInternationalization('Success') class Success(plugins.ChannelIdDatabasePlugin): """This plugin was written initially to work with MoobotFactoids, the two of them to provide a similar-to-moobot-and-blootbot interface for factoids. Basically, it replaces the standard 'The operation succeeded.' messages with messages kept in a database, able to give more personable responses.""" def __init__(self, irc): self.__parent = super(Success, self) self.__parent.__init__(irc) self.target = None pluginSelf = self self.originalClass = conf.supybot.replies.success.__class__ class MySuccessClass(self.originalClass): def __call__(self): ret = pluginSelf.db.random(pluginSelf.target or 'private_query') if ret is None: try: self.__class__ = pluginSelf.originalClass ret = self() finally: self.__class__ = MySuccessClass else: ret = ret.text return ret def get(self, attr): if ircutils.isChannel(attr): pluginSelf.target = attr return self conf.supybot.replies.success.__class__ = MySuccessClass def die(self): self.__parent.die() conf.supybot.replies.success.__class__ = self.originalClass def inFilter(self, irc, msg): # We need the target, but we need it before Owner.doPrivmsg is called, # so this seems like the only way to do it. self.target = msg.args[0] return msg Success = internationalizeDocstring(Success) Class = Success # vim:set shiftwidth=4 softtabstop=8 expandtab textwidth=78:
Python
0
@@ -2554,44 +2554,27 @@ dom( -pluginSelf.target or 'private_query' +dynamic.msg.args%5B0%5D )%0A @@ -3237,226 +3237,8 @@ ss%0A%0A - def inFilter(self, irc, msg):%0A # We need the target, but we need it before Owner.doPrivmsg is called,%0A # so this seems like the only way to do it.%0A self.target = msg.args%5B0%5D%0A return msg%0A Succ @@ -3296,17 +3296,16 @@ uccess%0A%0A -%0A # vim:se
4c084313d2e27a620f194e6282a51aa1e94f7a35
Change chunk so it only takes an int
node/floor_divide.py
node/floor_divide.py
#!/usr/bin/env python from nodes import Node class FloorDiv(Node): char = "f" args = 2 results = 1 @Node.test_func([3,2], [1]) @Node.test_func([6,-3], [-2]) def func(self, a:Node.number,b:Node.number): """a/b. Rounds down, returns an int.""" return a//b @Node.test_func(["test", "e"], [["t", "e", "st"]]) def partition(self, string:str, sep:str): """Split the string at the first occurrence of sep, return a 3-list containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return a 3-list containing the string itself, followed by two empty strings.""" return [list(string.partition(sep))] @Node.test_func(["134", 1], [["134"]]) @Node.test_func(["1234", 2], [["12", "34"]]) @Node.test_func(["1234", 3], [["1", "2", "34"]]) @Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]]) def chunk(self, inp:Node.indexable, num:Node.number): """Return inp seperated into num groups""" rtn = [] last = 0 size = len(inp)//num for i in range(size, len(inp), size): rtn.append(inp[last:i]) last = i if len(rtn) != num: rtn.append(inp[last:]) else: rtn[-1] += inp[last:] if len(rtn): if isinstance(inp, str): rtn[-1] = "".join(rtn[-1]) else: rtn[-1] = type(inp)(rtn[-1]) return [rtn]
Python
0.000002
@@ -999,27 +999,19 @@ le, num: -Node.number +int ):%0A
002aa415c8711eb484688cd867a8a70907e5e545
Remove _py_version_switch
numba/core/typing/asnumbatype.py
numba/core/typing/asnumbatype.py
import inspect import typing as py_typing from numba.core.typing.typeof import typeof from numba.core import errors, types, utils def _py_version_switch(py_version, new_result, old_result): return new_result if utils.PYVERSION >= py_version else old_result class AsNumbaTypeRegistry: """ A registry for python typing declarations. This registry stores a lookup table for simple cases (e.g. int) and a list of functions for more complicated cases (e.g. generics like List[int]). The as_numba_type registry is meant to work statically on type annotations at compile type, not dynamically on instances at runtime. To check the type of an object at runtime, see numba.typeof. """ def __init__(self): self.lookup = { type(example): typeof(example) for example in [ 0, 0.0, complex(0), "numba", True, None, ] } self.functions = [self._builtin_infer, self._numba_type_infer] def _numba_type_infer(self, py_type): if isinstance(py_type, types.Type): return py_type def _builtin_infer(self, py_type): # The type hierarchy of python typing library changes in 3.7. generic_type_check = _py_version_switch( (3, 7), lambda x: isinstance(x, py_typing._GenericAlias), lambda _: True, ) if not generic_type_check(py_type): return list_origin = _py_version_switch((3, 7), list, py_typing.List) dict_origin = _py_version_switch((3, 7), dict, py_typing.Dict) set_origin = _py_version_switch((3, 7), set, py_typing.Set) tuple_origin = _py_version_switch((3, 7), tuple, py_typing.Tuple) if getattr(py_type, "__origin__", None) is py_typing.Union: if len(py_type.__args__) != 2: raise errors.TypingError( "Cannot type Union of more than two types") (arg_1_py, arg_2_py) = py_type.__args__ if arg_2_py is type(None): # noqa: E721 return types.Optional(self.infer(arg_1_py)) elif arg_1_py is type(None): # noqa: E721 return types.Optional(self.infer(arg_2_py)) else: raise errors.TypingError( "Cannot type Union that is not an Optional " f"(neither type type {arg_2_py} is not NoneType") if getattr(py_type, "__origin__", None) is list_origin: (element_py,) = py_type.__args__ return types.ListType(self.infer(element_py)) if getattr(py_type, "__origin__", None) is dict_origin: key_py, value_py = py_type.__args__ return types.DictType(self.infer(key_py), self.infer(value_py)) if getattr(py_type, "__origin__", None) is set_origin: (element_py,) = py_type.__args__ return types.Set(self.infer(element_py)) if getattr(py_type, "__origin__", None) is tuple_origin: tys = tuple(map(self.infer, py_type.__args__)) return types.BaseTuple.from_types(tys) def register(self, func_or_py_type, numba_type=None): """ Extend AsNumbaType to support new python types (e.g. a user defined JitClass). For a simple pair of a python type and a numba type, can use as a function register(py_type, numba_type). If more complex logic is required (e.g. for generic types), register can also be used as a decorator for a function that takes a python type as input and returns a numba type or None. """ if numba_type is not None: # register used with a specific (py_type, numba_type) pair. assert isinstance(numba_type, types.Type) self.lookup[func_or_py_type] = numba_type else: # register used as a decorator. assert inspect.isfunction(func_or_py_type) self.functions.append(func_or_py_type) def try_infer(self, py_type): """ Try to determine the numba type of a given python type. We first consider the lookup dictionary. If py_type is not there, we iterate through the registered functions until one returns a numba type. If type inference fails, return None. """ result = self.lookup.get(py_type, None) for func in self.functions: if result is not None: break result = func(py_type) if result is not None and not isinstance(result, types.Type): raise errors.TypingError( f"as_numba_type should return a numba type, got {result}" ) return result def infer(self, py_type): result = self.try_infer(py_type) if result is None: raise errors.TypingError( f"Cannot infer numba type of python type {py_type}" ) return result def __call__(self, py_type): return self.infer(py_type) as_numba_type = AsNumbaTypeRegistry()
Python
0.000549
@@ -120,148 +120,8 @@ ypes -, utils%0A%0A%0Adef _py_version_switch(py_version, new_result, old_result):%0A return new_result if utils.PYVERSION %3E= py_version else old_result %0A%0A%0Ac @@ -1092,586 +1092,79 @@ -# The type hierarchy of python typing library changes in 3.7.%0A generic_type_check = _py_version_switch(%0A (3, 7),%0A lambda x: isinstance(x, py_typing._GenericAlias),%0A lambda _: True,%0A )%0A if not generic_type_check(py_type):%0A return%0A%0A list_origin = _py_version_switch((3, 7), list, py_typing.List)%0A dict_origin = _py_version_switch((3, 7), dict, py_typing.Dict)%0A set_origin = _py_version_switch((3, 7), set, py_typing.Set)%0A tuple_origin = _py_version_switch((3, 7), tuple, py_typing.Tuple) +if not isinstance(py_type, py_typing._GenericAlias):%0A return %0A%0A @@ -1909,23 +1909,16 @@ is list -_origin :%0A @@ -2070,23 +2070,16 @@ is dict -_origin :%0A @@ -2251,23 +2251,16 @@ ) is set -_origin :%0A @@ -2408,23 +2408,16 @@ is tuple -_origin :%0A
30a581f8af6efe7a6d2ff3ce803b728558bdce1e
Fix message
vmssextn.py
vmssextn.py
# Python script to do read/install/delete VM extensions on VM Scale Sets # usage: vmssextn -r rgname -v vmssname [--delete extnname] [--add extnfile] [-y][--verbose] import argparse import json import sys import time import subscription import vmss import azurerm def main(): # create parser argParser = argparse.ArgumentParser() argParser.add_argument('--vmssname', '-s', required=True, action='store', help='VM Scale Set name') argParser.add_argument('--resourcegroup', '-r', required=True, dest='resource_group', action='store', help='Resource group name') argParser.add_argument('--delete', '-n', dest='extnname', action='store', help='Name of extension to delete') argParser.add_argument('--add', '-c', dest='extnfile', action='store', help='File containing extension defintion to add') argParser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') argParser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = argParser.parse_args() # switches to determine program behavior noprompt = args.noprompt # go ahead and upgrade without waiting for confirmation when True verbose = args.verbose # print extra status information when True vmssname = args.vmssname resource_group = args.resource_group if args.extnname is not None: extnname = args.extnname mode = 'delete' elif args.extnfile is not None: extnfile = args.extnfile mode = 'add' else: mode = 'report' # Load Azure app defaults try: with open('vmssconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssconfig.json in current folder") sys.exit() sub = subscription.subscription(configData['tenantId'], configData['appId'], configData['appSecret'], configData['subscriptionId']) sub.get_vmss_list() current_vmss = vmss.vmss(vmssname, sub.vmssdict[vmssname], sub.sub_id, sub.access_token) # print(json.dumps(vmssmodel, sort_keys=False, indent=2, separators=(',', ': '))) # start by getting the extension list try: extnprofile = current_vmss.model['properties']['virtualMachineProfile']['extensionProfile'] except KeyError: if mode != 'add': print('Scale Set: ' + vmssname + ' does not have any extensions defined.') sys.exit() else: extnprofile = None if mode == 'report': # print selected details about each extension print('Found the following extensions in scale set: ' + vmssname) for extension in extnprofile['extensions']: print('\nName: ' + extension['name']) print('Type: ' + extension['properties']['type']) print('Publisher: ' + extension['properties']['publisher']) print('Version: ' + extension['properties']['typeHandlerVersion']) if verbose: print(json.dumps(extension, sort_keys=False, indent=2, separators=(',', ': '))) sys.exit() elif mode == 'delete': index = 0 extn_index = -1 for extension in extnprofile['extensions']: if extension['name'] == extnname: extn_index = index index += 1 if extn_index > -1: # delete the extension from the list del extnprofile['extensions'][extn_index] else: print('Extension ' + extnname + ' not found.') elif mode == 'add': # load the extension definition file try: with open(extnfile) as extension_file: extndata = json.load(extension_file) except FileNotFoundError: print("Error: Expecting ' + extnfile + ' in current folder (or absolute path)") sys.exit() if extnprofile is None: # create an extensionProfile extnprofile = {'extensions':[]} # add the extension definition to the list extnprofile['extensions'].append(extndata) # update and apply model with the new extensionProfile current_vmss.update_extns(extnprofile) print('VMSS update status: ' + str(current_vmss.status.status_code)) if current_vmss.status.status_code == 200: if current_vmss.upgradepolicy == 'Manual': print('Update in progress. Once model update completes, apply manualUpgrade to VMs.') print("Note: You won't be able re-add an extension of the same name until the new model is to all VMs.") else: print('Scale Set update in progress.') else: print(current_vmss.status.text) if __name__ == "__main__": main()
Python
0.000002
@@ -4727,16 +4727,24 @@ odel is +applied to all V
a9482327727be721ba74c0251ec17f63f79fb1d7
fix faulty merge
galpy/potential.py
galpy/potential.py
from galpy.potential_src import Potential from galpy.potential_src import planarPotential from galpy.potential_src import linearPotential from galpy.potential_src import verticalPotential from galpy.potential_src import MiyamotoNagaiPotential from galpy.potential_src import IsochronePotential from galpy.potential_src import LogarithmicHaloPotential from galpy.potential_src import DoubleExponentialDiskPotential from galpy.potential_src import PowerSphericalPotential from galpy.potential_src import PowerSphericalPotentialwCutoff from galpy.potential_src import TwoPowerSphericalPotential from galpy.potential_src import plotRotcurve from galpy.potential_src import plotEscapecurve from galpy.potential_src import KGPotential from galpy.potential_src import interpRZPotential from galpy.potential_src import DehnenBarPotential from galpy.potential_src import SteadyLogSpiralPotential from galpy.potential_src import TransientLogSpiralPotential from galpy.potential_src import MovingObjectPotential from galpy.potential_src import ForceSoftening from galpy.potential_src import EllipticalDiskPotential from galpy.potential_src import CosmphiDiskPotential from galpy.potential_src import RazorThinExponentialDiskPotential from galpy.potential_src import FlattenedPowerPotential from galpy.potential_src import SnapshotRZPotential from galpy.potential_src import BurkertPotential from galpy.potential_src import MN3ExponentialDiskPotential from galpy.potential_src import KuzminKutuzovStaeckelPotential # # Functions # evaluatePotentials= Potential.evaluatePotentials evaluateDensities= Potential.evaluateDensities evaluateRforces= Potential.evaluateRforces evaluatephiforces= Potential.evaluatephiforces evaluatezforces= Potential.evaluatezforces evaluateR2derivs= Potential.evaluateR2derivs evaluatez2derivs= Potential.evaluatez2derivs evaluateRzderivs= Potential.evaluateRzderivs RZToplanarPotential= planarPotential.RZToplanarPotential RZToverticalPotential= verticalPotential.RZToverticalPotential plotPotentials= Potential.plotPotentials plotDensities= Potential.plotDensities plotplanarPotentials= planarPotential.plotplanarPotentials plotlinearPotentials= linearPotential.plotlinearPotentials calcRotcurve= plotRotcurve.calcRotcurve vcirc= plotRotcurve.vcirc dvcircdR= plotRotcurve.dvcircdR epifreq= Potential.epifreq verticalfreq= Potential.verticalfreq flattening= Potential.flattening rl= Potential.rl omegac= Potential.omegac vterm= Potential.vterm lindbladR= Potential.lindbladR plotRotcurve= plotRotcurve.plotRotcurve calcEscapecurve= plotEscapecurve.calcEscapecurve vesc= plotEscapecurve.vesc plotEscapecurve= plotEscapecurve.plotEscapecurve evaluateplanarPotentials= planarPotential.evaluateplanarPotentials evaluateplanarRforces= planarPotential.evaluateplanarRforces evaluateplanarR2derivs= planarPotential.evaluateplanarR2derivs evaluateplanarphiforces= planarPotential.evaluateplanarphiforces evaluatelinearPotentials= linearPotential.evaluatelinearPotentials evaluatelinearForces= linearPotential.evaluatelinearForces PotentialError= Potential.PotentialError LinShuReductionFactor= planarPotential.LinShuReductionFactor nemo_accname= Potential.nemo_accname nemo_accpars= Potential.nemo_accpars # # Classes # Potential= Potential.Potential planarAxiPotential= planarPotential.planarAxiPotential planarPotential= planarPotential.planarPotential linearPotential= linearPotential.linearPotential MiyamotoNagaiPotential= MiyamotoNagaiPotential.MiyamotoNagaiPotential IsochronePotential= IsochronePotential.IsochronePotential DoubleExponentialDiskPotential= DoubleExponentialDiskPotential.DoubleExponentialDiskPotential LogarithmicHaloPotential= LogarithmicHaloPotential.LogarithmicHaloPotential KeplerPotential= PowerSphericalPotential.KeplerPotential PowerSphericalPotential= PowerSphericalPotential.PowerSphericalPotential PowerSphericalPotentialwCutoff= PowerSphericalPotentialwCutoff.PowerSphericalPotentialwCutoff NFWPotential= TwoPowerSphericalPotential.NFWPotential JaffePotential= TwoPowerSphericalPotential.JaffePotential HernquistPotential= TwoPowerSphericalPotential.HernquistPotential TwoPowerSphericalPotential= TwoPowerSphericalPotential.TwoPowerSphericalPotential KGPotential= KGPotential.KGPotential interpRZPotential= interpRZPotential.interpRZPotential DehnenBarPotential= DehnenBarPotential.DehnenBarPotential SteadyLogSpiralPotential= SteadyLogSpiralPotential.SteadyLogSpiralPotential TransientLogSpiralPotential= TransientLogSpiralPotential.TransientLogSpiralPotential MovingObjectPotential= MovingObjectPotential.MovingObjectPotential EllipticalDiskPotential= EllipticalDiskPotential.EllipticalDiskPotential LopsidedDiskPotential= CosmphiDiskPotential.LopsidedDiskPotential CosmphiDiskPotential= CosmphiDiskPotential.CosmphiDiskPotential RazorThinExponentialDiskPotential= RazorThinExponentialDiskPotential.RazorThinExponentialDiskPotential FlattenedPowerPotential= FlattenedPowerPotential.FlattenedPowerPotential InterpSnapshotRZPotential = SnapshotRZPotential.InterpSnapshotRZPotential SnapshotRZPotential = SnapshotRZPotential.SnapshotRZPotential BurkertPotential= BurkertPotential.BurkertPotential KuzminKutuzovStaeckelPotential = KuzminKutuzovStaeckelPotential.KuzminKutuzovStaeckelPotential #Softenings PlummerSoftening= ForceSoftening.PlummerSoftening # # Constants # MWPotential= [MiyamotoNagaiPotential(a=0.5,b=0.0375,normalize=.6), NFWPotential(a=4.5,normalize=.35), HernquistPotential(a=0.6/8,normalize=0.05)] # See Table 1 in galpy paper: Bovy (2014) MWPotential2014= [PowerSphericalPotentialwCutoff(normalize=0.05,alpha=1.8,rc=1.9/8.), MiyamotoNagaiPotential(a=3./8.,b=0.28/8.,normalize=0.6), NFWPotential(a=2.,normalize=0.35)]
Python
0.000002
@@ -5133,16 +5133,101 @@ tential%0A +MN3ExponentialDiskPotential= MN3ExponentialDiskPotential.MN3ExponentialDiskPotential%0A KuzminKu @@ -5313,16 +5313,17 @@ tential%0A +%0A #Softeni
942e3b183859623d2f2a6bf874f8d763e960ea5b
Print AST during integration test
tests/integration/test_integration.py
tests/integration/test_integration.py
import collections import io import json import os import pytest import glob import subprocess import thinglang from thinglang import run, utils BASE_PATH = os.path.dirname(os.path.abspath(__file__)) SEARCH_PATTERN = os.path.join(BASE_PATH, '**/*.thing') TestCase = collections.namedtuple('TestCase', ['code', 'metadata', 'name', 'bytecode_target']) def collect_tests(): for path in glob.glob(SEARCH_PATTERN, recursive=True): with open(path, 'r') as f: contents = f.read() metadata_start = contents.index('/*') + 2 metadata_end = contents.index('*/') metadata = json.loads(contents[metadata_start:metadata_end]) yield TestCase( contents[metadata_end + 2:], metadata, metadata.get('test_name') or '.'.join(path.replace('.thing', '').split(os.sep)[-2:]), path + 'c' ) def split_lines(param): return param.replace('\r', '').split('\n') @pytest.mark.parametrize('test_file', collect_tests(), ids=lambda x: x.name) def test_thing_program(test_file): expected_output = test_file.metadata['expected_output'] utils.print_header("Bytecode generation") bytecode = thinglang.compiler(test_file.code).compile().finalize() print(bytecode) utils.print_header('VM execution') with open(test_file.bytecode_target, 'wb') as f: f.write(bytecode) vm = subprocess.Popen(["thinglang", test_file.bytecode_target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = (stream.decode('utf-8').strip() for stream in vm.communicate()) print(stderr) utils.print_header('VM output') print(stdout) local = thinglang.run(test_file.code).output if not isinstance(expected_output, str): stdout = split_lines(stdout) local = split_lines(local) assert vm.returncode == 0, 'VM process crashed' assert local == expected_output, 'Execution engine output did not match expected output' assert stdout == expected_output, 'VM output did not match expected output'
Python
0.000001
@@ -1189,43 +1189,29 @@ der( -%22Bytecode generation%22)%0A bytecode +'Parsed AST')%0A ast = t @@ -1243,16 +1243,104 @@ le.code) +%0A print(ast.tree())%0A%0A utils.print_header(%22Bytecode generation%22)%0A bytecode = ast .compile
e032136bdb5dbc8dfede3dfef8f4ede735cc5f3a
fix spacing
volatile.py
volatile.py
#!/usr/bin/python2 # # Name: volatile # Auth: Gavin Lloyd <[email protected]> # Date: 21 Apr 2011 (last modified: 15 Nov 2012) # Desc: Simple ALSA status icon and volume control # import pygtk import gtk import alsaaudio import gobject import signal pygtk.require("2.0") PANEL_HEIGHT = 24 # in pixels, negative if panel is on the bottom WINDOW_OPACITY = 0.95 # UPDATE_INTERVAL = 250 # in ms VOLUME_WIDTH = 200 # in pixels VOLUME_HEIGHT = 25 # in pixels, adjust if the widget doesn't fit SCROLL_BY = 2 # increase to scroll "faster" def volatile(): init_volume() global icon icon = gtk.StatusIcon() icon.connect('activate', show_window) icon.connect('popup-menu', toggle_mute) icon.connect('scroll-event', on_scroll) icon.timeout = gobject.timeout_add(UPDATE_INTERVAL, update_all) update_all() icon.set_visible(1) gtk.main() # # create the slider and containing window # def init_volume(): global window window = gtk.Window(gtk.WINDOW_POPUP) window.set_opacity(WINDOW_OPACITY) global slider slider = gtk.HScale() slider.set_size_request(VOLUME_WIDTH, VOLUME_HEIGHT) slider.set_range(0, 100) slider.set_increments(-SCROLL_BY, 12) slider.set_draw_value(0) slider.connect('value-changed', on_slide) frame = gtk.Frame() frame.set_shadow_type(gtk.SHADOW_OUT) frame.add(slider) window.add(frame) # # icon was clicked, show the window or re-hide it if already visible # def show_window(widget): if window.get_property('visible'): window.hide() else: update_all() window.set_position(gtk.WIN_POS_MOUSE) window.move(window.get_position()[0], PANEL_HEIGHT) window.show_all() window.present() # # set the volume to some level bound by [0,100] # def set_volume(level): volume = int(level) if volume > 100: volume = 100 if volume < 0: volume = 0 mixer.setvolume(volume) update_all() def toggle_mute(widget, button, time): mixer.setmute(not mixer.getmute()[0]) update_all() # # event handler for the HScale being dragged # def on_slide(widget): volume = widget.get_value() set_volume(volume) # # event handler for scrolling while hovering the icon # def on_scroll(widget, event): volume = mixer.getvolume()[0] if event.direction == gtk.gdk.SCROLL_UP: set_volume(volume + (SCROLL_BY*2)) elif event.direction == gtk.gdk.SCROLL_DOWN: set_volume(volume - (SCROLL_BY*2)) # # updates the global mixer, moves slider and updates icon # def update_all(): global mixer mixer = alsaaudio.Mixer('Master', 0, 0) volume = mixer.getvolume()[0] muted = mixer.getmute()[0] slider.set_value(volume) if volume <= 0 or muted: icon.set_from_icon_name('audio-volume-muted') elif volume <= 20: icon.set_from_icon_name('audio-volume-off') elif volume <= 55: icon.set_from_icon_name('audio-volume-low') elif volume <= 90: icon.set_from_icon_name('audio-volume-medium') else: icon.set_from_icon_name('audio-volume-high') return True if __name__ == '__main__': signal.signal(signal.SIGINT, gtk.main_quit) volatile()
Python
0.000054
@@ -662,18 +662,16 @@ tivate', - show_wi @@ -2323,25 +2323,27 @@ + (SCROLL_BY -* + * 2))%0A elif e @@ -2415,17 +2415,19 @@ CROLL_BY -* + * 2))%0A%0A%0A#%0A
4fc0162c73178678281c0e09cf32ffefa4b7b923
Handle unavailable server
flasque/client.py
flasque/client.py
# -*- coding: utf8 -*- import json import Queue import requests import threading class ThreadQueue(threading.Thread): def __init__(self, api, qname, *args, **kwargs): super(ThreadQueue, self).__init__(*args, **kwargs) self.api = api self.qname = qname self.q = Queue.Queue() self.daemon = True self._stop = threading.Event() def run(self): raise NotImplementedError def get(self, *args, **kwargs): return self.q.get(*args, **kwargs) def put(self, *args, **kwargs): return self.q.put(*args, **kwargs) def task_done(self): return self.q.task_done() def stop(self): self._stop.set() def close(self): self._stop() self.join() class Producer(ThreadQueue): def run(self): while True: try: data = self.get(timeout=1) except Queue.Empty: pass else: requests.post(self.api + "/queue/" + self.qname, data=data) if self._stop.is_set(): return class Consumer(ThreadQueue): def run(self): while True: res = requests.get( self.api + "/queue/", params={"q": self.qname}, stream=True, ) for line in res.iter_lines(chunk_size=1): if self._stop.is_set(): return res = json.loads(line) self.q.put(res["data"]) self.q.join() requests.delete( self.api + "/queue/" + res["q"], params={"msgid": res["msgid"]}, ) class Connection(object): def __init__(self, api="http://localhost:5000"): self.api = api self.threads = [] super(Connection, self).__init__() def Producer(self, qname): producer = Producer(self.api, qname) producer.start() self.threads.append(producer) return producer def Consumer(self, *qname): consumer = Consumer(self.api, qname) consumer.start() self.threads.append(consumer) return consumer def close(self): for th in self.threads: th.stop() for th in self.threads: th.join() self.threads = [] def __enter__(self): return self def __exit__(self, type, value, traceback): self.close()
Python
0.000001
@@ -29,16 +29,28 @@ rt json%0A +import time%0A import Q @@ -418,33 +418,276 @@ -raise NotImplementedError +while True:%0A self.loop()%0A%0A @staticmethod%0A def make_request(func, *args, **kwargs):%0A while True:%0A try:%0A return func(*args, **kwargs)%0A except requests.exceptions.RequestException:%0A time.sleep(1) %0A%0A @@ -1048,35 +1048,36 @@ ):%0A%0A def -run +loop (self):%0A whil @@ -1068,32 +1068,8 @@ f):%0A - while True:%0A @@ -1089,20 +1089,16 @@ - data = s @@ -1116,20 +1116,16 @@ eout=1)%0A - @@ -1160,16 +1160,26 @@ - pass +%0A else: %0A @@ -1179,37 +1179,50 @@ se:%0A +s el -se: +f.make_request( %0A @@ -1235,17 +1235,34 @@ sts.post -( +,%0A self.api @@ -1287,16 +1287,32 @@ f.qname, +%0A data=da @@ -1313,22 +1313,32 @@ ata=data -) +, %0A + )%0A @@ -1369,28 +1369,24 @@ - - return%0A%0A%0Acla @@ -1412,35 +1412,36 @@ ueue):%0A%0A def -run +loop (self):%0A @@ -1436,35 +1436,48 @@ f):%0A -while True: +res = self.make_request( %0A @@ -1476,22 +1476,16 @@ - res = request @@ -1489,22 +1489,18 @@ ests.get -(%0A +,%0A @@ -1529,36 +1529,32 @@ /%22,%0A - - params=%7B%22q%22: sel @@ -1575,20 +1575,16 @@ - stream=T @@ -1592,34 +1592,26 @@ ue,%0A - )%0A +)%0A for @@ -1656,28 +1656,24 @@ - if self._sto @@ -1676,36 +1676,32 @@ _stop.is_set():%0A - @@ -1711,28 +1711,24 @@ urn%0A - res = json.l @@ -1738,20 +1738,16 @@ s(line)%0A - @@ -1774,28 +1774,24 @@ %22%5D)%0A - self.q.join( @@ -1789,24 +1789,50 @@ f.q.join()%0A + self.make_request(%0A r @@ -1822,32 +1822,33 @@ est(%0A + requests.delete( @@ -1846,22 +1846,18 @@ s.delete -(%0A +,%0A @@ -1897,36 +1897,32 @@ %22%5D,%0A - - params=%7B%22msgid%22: @@ -1937,20 +1937,16 @@ gid%22%5D%7D,%0A -
7dd10d88b89da4a10db45d6393fd05d0d2dc718e
Change get_check_by_name optional argument
pingdombackup/PingdomBackup.py
pingdombackup/PingdomBackup.py
from calendar import timegm from datetime import datetime, timedelta from .Pingdom import Pingdom from .Database import Database from .log import log class PingdomBackup: MAX_INTERVAL = 2764800 def __init__(self, email, password, app_key, database): self.pingdom = Pingdom(email, password, app_key) self.database = Database(database) def update_probes(self): # get the probe list log.info('Updating probe records.') resp_json = self.pingdom.api('GET', 'probes', params={'includedeleted': True}) probes = resp_json['probes'] for probe in probes: self.database.upsert_record('probes', probe) log.info('{0} {1} updated.'.format(len(probes), 'probe was' if len(probes) == 1 else 'probes were')) def update_checks(self): # get the checks list log.info('Updating check records.') resp_json = self.pingdom.api('GET', 'checks') checks = resp_json['checks'] for check in checks: del check['tags'] self.database.upsert_record('checks', check) log.info('{0} {1} updated.'.format(len(checks), 'check was' if len(checks) == 1 else 'checks were')) def get_check_by_name(self, name, from_api=True): if from_api: self.update_checks() return self.database.get_record('checks', where='name = ?', parameters=(name, )) def update_results(self, check): log.info('Checking for new results.') # get the most recent result time from the database results = self.database.get_records('results', order_by='time DESC', limit=1) if len(results) == 0: min_from_t = 0 else: # + 1 because we don't want to include the previous result min_from_t = results[0]['time'] + 1 to_t = timegm((datetime.now() + timedelta(days=2)).timetuple()) limit = 1000 last_count = limit all_results = [] while last_count == limit: # calculate the minimum bound from_t = max(to_t - self.MAX_INTERVAL, min_from_t) # get the next page resp_json = self.pingdom.api('GET', 'results/{0}'.format(check['id']), params={ 'to': to_t, 'from': from_t, 'limit': limit }) results = resp_json['results'] last_count = len(results) # inspect each row for result in results: result['id'] = None result['checkid'] = check['id'] # update the to_timestamp if result['time'] < to_t: to_t = result['time'] all_results.extend(results) # bulk insert all_results = sorted(all_results, key=lambda r: r['time']) log.info('{0} new {1} been found.'.format(len(all_results), 'record has' if len(all_results) == 1 else 'records have')) self.database.insert_records('results', all_results)
Python
0.000003
@@ -1237,80 +1237,10 @@ name -, from_api=True):%0A if from_api:%0A self.update_checks()%0A +): %0A @@ -2823,20 +2823,20 @@ ts), 're -cord +sult has' if @@ -2866,20 +2866,20 @@ else 're -cord +sult s have')
522dcc5f4be7271f85736160f10664a3983285b4
delete events typo
event_review/admin.py
event_review/admin.py
import re from django.contrib import admin from django import forms from django.utils.html import format_html, mark_safe from event_store.models import Event from reviewer.filters import ReviewerOrganizationFilter, review_widget from huerta.filters import CollapsedListFilter def phone_format(phone): return format_html('<span style="white-space: nowrap">{}</span>', re.sub(r'^(\d{3})(\d{3})(\d{4})', '(\\1) \\2-\\3', phone)) def event_list_display(obj): scope = obj.political_scope_display() if scope: scope = ' ({})'.format(scope) return format_html(""" <div class="row"> <div class="col-md-6"> <h5>{title} ({pk})</h5> {private} <div><b>Host:</b> {host} ({host_is_confirmed})</div> <div><b>Where:</b>{political_scope} <div>{venue}</div> <div>{address}</div> <div>{city}, {state}</div> </div> <div><b>When:</b> {when}</div> <div><b>Attendees:</b> {attendee_count}{max_attendees}</div> <div><b>Description</b> {description}</div> </div> <div class="col-md-6"> <div><b>Private Phone:</b> {private_phone}</div> <div><b>Event Status:</b> {active_status}</div> {review_widget} </div> </div> """, title=obj.title, pk=obj.organization_source_pk, venue=obj.venue, address='%s %s' % (obj.address1, obj.address2), city=obj.city, state=obj.state, political_scope=scope, private_phone=phone_format(obj.private_phone), when=obj.starts_at.strftime('%c'), attendee_count=obj.attendee_count, max_attendees='/%s' % obj.max_attendees if obj.max_attendees else '', host_is_confirmed=mark_safe('<span style="color:green">confirmed</span>' if obj.host_is_confirmed else '<span style="color:red">unconfirmed</span>'), private=mark_safe('<div class="label label-danger">Private</div>') if obj.is_private else '', host=obj.organization_host, #review_status=obj.organization_status_review, #prep_status=obj.organization_status_prep, active_status=obj.status, review_widget=review_widget(obj), #notes=mark_safe('<textarea rows="5" class="form-control" readonly>%s</textarea>' % obj.notes) # if obj.notes else None, description = mark_safe('<textarea rows="5" class="form-control" readonly>%s</textarea>' % obj.public_description) if obj.public_description else None) @admin.register(Event) class EventAdmin(admin.ModelAdmin): change_list_template = "admin/change_list_filters_top.html" filters_collapsable = True filters_require_submit = True disable_list_headers = True list_striped = True list_display = (event_list_display,) list_filter = (ReviewerOrganizationFilter, ('organization_campaign', CollapsedListFilter), ('organization_status_review', CollapsedListFilter), ('organization_status_prep', CollapsedListFilter), ('state', CollapsedListFilter), ('is_private', CollapsedListFilter), ('starts_at', CollapsedListFilter), ('ends_at', CollapsedListFilter), ('attendee_count', CollapsedListFilter), ('status', CollapsedListFilter), ('host_is_confirmed', CollapsedListFilter)) list_display_links = None def get_actions(self, request): actions = super(EventAdmin, self).get_actions(request) if 'deleted_selected' in actions: del actions['delete_selected'] return actions def has_delete_permission(self, request, obj=None): return False def has_add_permission(self, request, obj=None): return False
Python
0.000022
@@ -3897,17 +3897,16 @@ 'delete -d _selecte
0dd41b65aaa0798a7a72a0d61d746bfa29bc3aad
Allow POST of fly and worm donors
src/encoded/types/donor.py
src/encoded/types/donor.py
from ..schema_utils import ( load_schema, ) from ..contentbase import ( location, ) from .base import ( ACCESSION_KEYS, ALIAS_KEYS, Collection, paths_filtered_by_status, ) class DonorItem(Collection.Item): base_types = ['donor'] + Collection.Item.base_types embedded = set(['organism']) name_key = 'accession' keys = ACCESSION_KEYS + ALIAS_KEYS rev = { 'characterizations': ('donor_characterization', 'characterizes'), } template = { 'characterizations': ( lambda root, characterizations: paths_filtered_by_status(root, characterizations) ), } @location('mouse-donors') class MouseDonor(Collection): item_type = 'mouse_donor' schema = load_schema('mouse_donor.json') __acl__ = [] properties = { 'title': 'Mouse donors', 'description': 'Listing Biosample Donors', } class Item(DonorItem): def __ac_local_roles__(self): # Disallow lab submitter edits return {} @location('fly-donors') class FlyDonor(Collection): item_type = 'fly_donor' schema = load_schema('fly_donor.json') __acl__ = [] properties = { 'title': 'Fly donors', 'description': 'Listing Biosample Donors', } class Item(DonorItem): embedded = set(['organism', 'constructs', 'constructs.target']) @location('worm-donors') class WormDonor(Collection): item_type = 'worm_donor' schema = load_schema('worm_donor.json') __acl__ = [] properties = { 'title': 'Worm donors', 'description': 'Listing Biosample Donors', } class Item(DonorItem): embedded = set(['organism', 'constructs', 'constructs.target']) @location('human-donors') class HumanDonor(Collection): item_type = 'human_donor' schema = load_schema('human_donor.json') properties = { 'title': 'Human donors', 'description': 'Listing Biosample Donors', } class Item(DonorItem): pass
Python
0
@@ -1146,33 +1146,16 @@ .json')%0A - __acl__ = %5B%5D%0A prop @@ -1482,33 +1482,16 @@ .json')%0A - __acl__ = %5B%5D%0A prop
b9133e2fe7444b4449ab67f4d726c20ce5e21cd8
clean ups in presentation of names
gazetteer/admin.py
gazetteer/admin.py
from django.contrib import admin from django import forms from gazetteer.models import * from skosxl.models import Notation from .settings import TARGET_NAMESPACE_FT # Register your models here. # works for Dango > 1.6 class NameInline(admin.TabularInline): model = LocationName class LocationTypeInlineForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(LocationTypeInlineForm, self).__init__(*args, **kwargs) self.fields['locationType'].queryset = Notation.objects.filter(concept__scheme__uri = TARGET_NAMESPACE_FT[0:-1] ) class LocationTypeInline(admin.StackedInline) : model = Notation form = LocationTypeInlineForm class LocationAdmin(admin.ModelAdmin): search_fields = ['locationType__term','locationname__name'] inlines = [ NameInline, ] class NameFieldConfigInline(admin.TabularInline): model = NameFieldConfig extra = 1 class CodeFieldConfigInline(admin.TabularInline): model = CodeFieldConfig extra = 1 class LocationTypeFieldInline(admin.TabularInline): model = LocationTypeField class GazSourceConfigAdmin(admin.ModelAdmin): model = GazSourceConfig inlines = [ LocationTypeFieldInline, NameFieldConfigInline, CodeFieldConfigInline ] admin.site.register(GazSource); admin.site.register(GazSourceConfig,GazSourceConfigAdmin); admin.site.register(Location, LocationAdmin); admin.site.register(LocationName); admin.site.register(LinkSet);
Python
0.000007
@@ -278,16 +278,78 @@ tionName +%0A readonly_fields = %5B'nameUsed', 'namespace'%5D%0A extra = 0 %0A%0A %0Ac
af18499e0d2c2017a01772153dbfa78c962458fa
Make sure source is a sequence of nodes before calling isfortran.
numscons/core/extension_scons.py
numscons/core/extension_scons.py
#! /usr/bin/env python # Last Change: Sun Jan 06 09:00 PM 2008 J # Module for support to build python extension. scons specific code goes here. import sys from copy import copy from distutils.unixccompiler import UnixCCompiler from numscons.numdist import msvc_runtime_library from extension import get_pythonlib_dir, get_python_inc from misc import built_with_mstools, built_with_mingw, built_with_gnu_f77, \ get_pythonlib_name, isfortran def PythonExtension(env, target, source, *args, **kw): # XXX: Some things should not be set here... Actually, this whole # thing is a mess. def floupi(key): if env.has_key(key): narg = copy(env[key]) else: narg = [] if kw.has_key(key): narg.append(kw.pop(key)) return narg LINKFLAGS = floupi('LINKFLAGS') CPPPATH = floupi('CPPPATH') LIBPATH = floupi('LIBPATH') LIBS = floupi('LIBS') CPPPATH.append(get_python_inc()) if sys.platform == 'win32': if built_with_mstools(env): # XXX: We add the path where to find python lib (or any other # version, of course). This seems to be necessary for MS compilers. #env.AppendUnique(LIBPATH = get_pythonlib_dir()) LIBPATH.append(get_pythonlib_dir()) elif built_with_mingw(env): # XXX: this part should be moved elsewhere (mingw abstraction # for python) # This is copied from mingw32ccompiler.py in numpy.distutils # (not supported by distutils.) # Include the appropiate MSVC runtime library if Python was # built with MSVC >= 7.0 (MinGW standard is msvcrt) py_runtime_library = msvc_runtime_library() LIBPATH.append(get_pythonlib_dir()) if isfortran(source): LIBS.append(get_pythonlib_name()) else: LIBS.extend([get_pythonlib_name(), py_runtime_library]) elif sys.platform == "darwin": # XXX: When those should be used ? (which version of Mac OS X ?) LINKFLAGS.extend(['-undefined', 'dynamic_lookup']) else: pass # Use LoadableModule because of Mac OS X # ... but scons has a bug (#issue 1669) with mingw and Loadable # Module, so use SharedLibrary with mingw. if built_with_mingw(env): wrap = env.SharedLibrary(target, source, SHLIBPREFIX = '', #LDMODULESUFFIX = '$PYEXTSUFFIX', SHLIBSUFFIX = '$PYEXTSUFFIX', LINKFLAGS = LINKFLAGS, LIBS = LIBS, LIBPATH = LIBPATH, CPPPATH = CPPPATH, *args, **kw) else: wrap = env.LoadableModule(target, source, SHLIBPREFIX = '', LDMODULESUFFIX = '$PYEXTSUFFIX', SHLIBSUFFIX = '$PYEXTSUFFIX', LINKFLAGS = LINKFLAGS, LIBS = LIBS, LIBPATH = LIBPATH, CPPPATH = CPPPATH, *args, **kw) return wrap def createStaticExtLibraryBuilder(env): """This is a utility function that creates the StaticExtLibrary Builder in an Environment if it is not there already. If it is already there, we return the existing one.""" import SCons.Action try: static_extlib = env['BUILDERS']['StaticExtLibrary'] except KeyError: action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ] if env.Detect('ranlib'): ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR") action_list.append(ranlib_action) static_extlib = SCons.Builder.Builder(action = action_list, emitter = '$LIBEMITTER', prefix = '$LIBPREFIX', suffix = '$LIBSUFFIX', src_suffix = '$OBJSUFFIX', src_builder = 'SharedObject') env['BUILDERS']['StaticExtLibrary'] = static_extlib return static_extlib
Python
0
@@ -1814,27 +1814,316 @@ -if isfortran(source +# XXX: this is really ugly. This is all because I am too lazy to do%0A # a real python extension builder, which I should really do at some%0A # point.%0A from SCons.Node.FS import default_fs%0A snodes = %5Bdefault_fs.Entry(s) for s in source%5D%0A if isfortran(snodes ):%0A
59da9b84c491fd5ca4f4c7add5891d5e9ee4b405
Make it work with astor 0.5 for now
flaws/asttools.py
flaws/asttools.py
import ast try: from ast import arg as ast_arg except ImportError: ast_arg = type('arg', (ast.AST,), {}) from funcy.py3 import lmap def is_write(node): return isinstance(node, (ast.Import, ast.ImportFrom, ast.ExceptHandler, ast.FunctionDef, ast.ClassDef, ast.arguments, ast_arg)) \ or isinstance(node.ctx, (ast.Store, ast.Del, ast.Param)) def is_read(node): return isinstance(node, ast.Name) and isinstance(node.ctx, ast.Load) def is_use(node): return isinstance(node, ast.Name) \ and isinstance(node.ctx, (ast.Load, ast.Del)) def is_constant(node): return isinstance(node, ast.Name) and node.id.isupper() def is_param(node): return isinstance(node, ast.Name) and isinstance(node.ctx, ast.Param) \ or isinstance(node, (ast.arguments, ast_arg)) def is_import(node): return isinstance(node, (ast.Import, ast.ImportFrom)) def is_name(node, name): return isinstance(node, ast.Name) and node.id == name def ast_eval(node): if isinstance(node, (ast.List, ast.Tuple)): return lmap(ast_eval, node.elts) elif isinstance(node, ast.Str): return node.s elif isinstance(node, ast.Num): return node.n else: raise ValueError("Don't know how to eval %s" % node.__class__.__name__) def name_class(node): if isinstance(node, (ast.Import, ast.ImportFrom)): return 'import' elif isinstance(node, ast.FunctionDef): return 'function' elif isinstance(node, ast.ClassDef): return 'class' elif is_param(node): return 'param' else: return 'variable' def node_str(node): return '%s at %d:%d' % (name_class(node), node.lineno, node.col_offset) def nodes_str(nodes): return '[%s]' % ', '.join(map(node_str, nodes)) # Parse to AST import sys import inspect import textwrap def get_body_ast(func): return get_ast(func).body[0].body def get_ast(func): # Get function source source = inspect.getsource(func) source = textwrap.dedent(source) # Preserve line numbers source = '\n' * (func.__code__.co_firstlineno - 1) + source return ast.parse(source, func_file(func), 'single') def func_file(func): return getattr(sys.modules[func.__module__], '__file__', '<nofile>') # Code generation from astor.code_gen import SourceGenerator from termcolor import colored def to_source(node, indent_with=' ' * 4, add_line_information=False): """ A modified to_source() function from astor. """ generator = AnnotatedSourceGenerator(indent_with, add_line_information) generator.visit(node) return ''.join(str(s) for s in generator.result) class AnnotatedSourceGenerator(SourceGenerator): def visit(self, node): SourceGenerator.visit(self, node) if not isinstance(node, (ast.Num, ast.Str)) and hasattr(node, 'val'): self.write(colored(' (%s)' % node.val, 'green'))
Python
0
@@ -2307,16 +2307,25 @@ ration%0A%0A +try:%0A from ast @@ -2359,16 +2359,82 @@ nerator%0A +except ImportError:%0A from astor.codegen import SourceGenerator%0A from ter
4ac7e5d15d3fba11ae37e5826ca6c7181539804b
Disable nested types tests affected by IMPALA-2295
tests/query_test/test_nested_types.py
tests/query_test/test_nested_types.py
#!/usr/bin/env python # Copyright (c) 2012 Cloudera, Inc. All rights reserved. import pytest from tests.common.test_vector import * from tests.common.impala_test_suite import * class TestNestedTypes(ImpalaTestSuite): @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestNestedTypes, cls).add_test_dimensions() cls.TestMatrix.add_constraint(lambda v: v.get_value('table_format').file_format == 'parquet') def test_scanner_basic(self, vector): """Queries that do not materialize arrays.""" self.run_test_case('QueryTest/nested-types-scanner-basic', vector) def test_scanner_array_materialization(self, vector): """Queries that materialize arrays.""" self.run_test_case('QueryTest/nested-types-scanner-array-materialization', vector) def test_scanner_multiple_materialization(self, vector): """Queries that materialize the same array multiple times.""" self.run_test_case('QueryTest/nested-types-scanner-multiple-materialization', vector) def test_scanner_position(self, vector): """Queries that materialize the artifical position element.""" self.run_test_case('QueryTest/nested-types-scanner-position', vector) def test_scanner_map(self, vector): """Queries that materialize maps. (Maps looks like arrays of key/value structs, so most map functionality is already tested by the array tests.)""" self.run_test_case('QueryTest/nested-types-scanner-maps', vector) def test_runtime(self, vector): """Queries that send collections through the execution runtime.""" self.run_test_case('QueryTest/nested-types-runtime', vector) def test_tpch(self, vector): """Queries over the larger nested TPCH dataset.""" # This test takes a long time (minutes), only run in exhaustive if self.exploration_strategy() != 'exhaustive': pytest.skip() self.run_test_case('QueryTest/nested-types-tpch', vector)
Python
0
@@ -1608,24 +1608,55 @@ runtime.%22%22%22%0A + pytest.skip(%22IMPALA-2295%22)%0A self.run @@ -1791,24 +1791,55 @@ dataset.%22%22%22%0A + pytest.skip(%22IMPALA-2295%22)%0A # This t
f5f0cc6998f28bee7ccdaf304d3bc5e7e45ab9a6
save memory allocation using kwarg `out`.
chainer/optimizer_hooks/gradient_hard_clipping.py
chainer/optimizer_hooks/gradient_hard_clipping.py
import chainer class GradientHardClipping(object): """Optimizer/UpdateRule hook function for gradient clipping. This hook function clips all gradient arrays to be within a lower and upper bound. Args: lower_bound (float): The lower bound of the gradient value. upper_bound (float): The upper bound of the gradient value. Attributes: ~optimizer_hooks.GradientHardClipping.lower_bound (float): The lower bound of the gradient value. ~optimizer_hooks.GradientHardClipping.upper_bound (float): The upper bound of the gradient value. ~optimizer_hooks.GradientHardClipping.timing (string): Specifies when this hook should be called by the Optimizer/UpdateRule. Valid values are 'pre' (before any updates) and 'post' (after any updates). ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \ Specifies if this hook is called for each parameter (``True``) or only once (``False``) by an optimizer to which this hook is registered. This function does not expect users to switch the value from default one, which is `True`. .. versionadded:: 4.0.0 The *timing* parameter. """ name = 'GradientHardClipping' call_for_each_param = True timing = 'pre' def __init__(self, lower_bound, upper_bound): self.lower_bound = lower_bound self.upper_bound = upper_bound def __call__(self, rule, param): grad = param.grad if grad is None: return with chainer.using_device(param.device): param.grad = param.grad.clip(self.lower_bound, self.upper_bound)
Python
0
@@ -8,16 +8,44 @@ chainer%0A +from chainer import backend%0A %0A%0Aclass @@ -1853,69 +1853,416 @@ -param.grad = param.grad.clip(self.lower_bound, self.upper_boun +xp = param.device.xp%0A if xp == backend.chainerx %5C%0A or isinstance(param.grad, backend.intel64.mdarray):%0A param.grad = grad.clip(self.lower_bound, self.upper_bound)%0A else:%0A # Save on new object allocation when using numpy and cupy%0A # using kwarg %60out%60%0A xp.clip(grad, self.lower_bound, self.upper_bound, out=gra d)%0A
8c17d2076d54864094c3cd8ee51d514bc806c913
bump version
flexx/__init__.py
flexx/__init__.py
""" `Flexx <https://flexx.readthedocs.io>`_ is a pure Python toolkit for creating graphical user interfaces (GUI's), that uses web technology for its rendering. Apps are written purely in Python; The `PScript <https://pscript.readthedocs.io>`_ transpiler generates the necessary JavaScript on the fly. You can use Flexx to create (cross platform) desktop applications, web applications, and export an app to a standalone HTML document. It also works in the Jupyter notebook. The docs are on `Readthedocs <http://flexx.readthedocs.io>`_, the code is on `Github <http://github.com/flexxui/flexx>`_, and there is a `demo server <http://demo.flexx.app>`_. Once you've got started, the most important page is probably the :doc:`Widget reference <ui/api>`. ---- For more information, see http://flexx.readthedocs.io. """ # NOTES ON DOCS: # There are 2 places that define the short summary of Flexx: the # __init__.py and the README.md. Their summaries should be kept equal. # The index.rst for the docs uses the summary from __init__.py (the # part after the "----" is stripped. The long-description for Pypi is # obtained by converting README.md to RST. __version__ = '0.7.1' # Assert compatibility import sys if sys.version_info < (3, 5): # pragma: no cover raise RuntimeError('Flexx needs at least Python 3.5') # Import config object from ._config import config # noqa from .util.logging import set_log_level # noqa set_log_level(config.log_level) del sys
Python
0
@@ -1169,11 +1169,11 @@ '0. -7.1 +8.0 '%0A%0A#
7ba77209687ae1bb1344cc09e3539f7e21bfe599
Improve test of csvstack --filenames.
tests/test_utilities/test_csvstack.py
tests/test_utilities/test_csvstack.py
#!/usr/bin/env python import sys import StringIO import unittest from csvkit import CSVKitReader from csvkit.utilities.stack import CSVStack class TestCSVStack(unittest.TestCase): def test_explicit_grouping(self): # stack two CSV files args = ["--groups", "asd,sdf", "-n", "foo", "examples/dummy.csv", "examples/dummy2.csv"] output_file = StringIO.StringIO() utility = CSVStack(args, output_file) utility.main() # verify the stacked file's contents input_file = StringIO.StringIO(output_file.getvalue()) reader = CSVKitReader(input_file) self.assertEqual(reader.next(), ["foo", "a", "b", "c"]) self.assertEqual(reader.next()[0], "asd") self.assertEqual(reader.next()[0], "sdf") def test_filenames_grouping(self): # stack two CSV files args = ["--filenames", "-n", "path", "examples/dummy.csv", "examples/dummy2.csv"] output_file = StringIO.StringIO() utility = CSVStack(args, output_file) utility.main() # verify the stacked file's contents input_file = StringIO.StringIO(output_file.getvalue()) reader = CSVKitReader(input_file) self.assertEqual(reader.next(), ["foo", "a", "b", "c"]) self.assertEqual(reader.next()[0], "asd") self.assertEqual(reader.next()[0], "sdf")
Python
0
@@ -1227,35 +1227,36 @@ eader.next(), %5B%22 -foo +path %22, %22a%22, %22b%22, %22c%22 @@ -1294,35 +1294,41 @@ der.next()%5B0%5D, %22 -asd +dummy.csv %22)%0A self. @@ -1342,31 +1342,38 @@ qual(reader.next()%5B0%5D, %22 -sdf +dummy2.csv %22)%0A%0A
2f2114b47618ef6435543c05d941d3191ef44d5c
refactor Valuation functions
FinSymPy/Valuation.py
FinSymPy/Valuation.py
from sympy.matrices import Determinant, Matrix def terminal_value( cash_flows=Matrix([0.]), long_term_discount_rate=0., long_term_growth_rate=0.): m, n = cash_flows.shape if m == 1: filter_vector = Matrix((n - 1) * [0] + [1]) tv = Determinant(cash_flows * filter_vector) elif n == 1: filter_vector = Matrix([(m - 1) * [0] + [1]]) tv = Determinant(filter_vector * cash_flows) return (1 + long_term_growth_rate) * tv / (long_term_discount_rate - long_term_growth_rate) def present_value(amount=0., discount_rate=0., nb_periods=0.): return amount / ((1 + discount_rate) ** nb_periods) def net_present_value( cash_flows=Matrix([0.]), discount_rate=0.): m, n = cash_flows.shape discount_rate_plus_1 = discount_rate + 1 if m == 1: discount_vector = Matrix([discount_rate_plus_1 ** -i for i in range(n)]) return Determinant(cash_flows * discount_vector) elif n == 1: discount_vector = Matrix([[discount_rate_plus_1 ** -i for i in range(m)]]) return Determinant(discount_vector * cash_flows)
Python
0.000069
@@ -1,51 +1,4 @@ -from sympy.matrices import Determinant, Matrix%0A %0A%0Ade @@ -23,39 +23,37 @@ -cash_flows=Matrix(%5B0.%5D) +terminal_cash_flow=0. ,%0A @@ -78,18 +78,19 @@ nt_rate= -0 . +01 ,%0A @@ -126,319 +126,63 @@ -m, n = cash_flows.shape%0A if m == 1:%0A filter_vector = Matrix((n - 1) * %5B0%5D + %5B1%5D)%0A tv = Determinant(cash_flows * filter_vector)%0A elif n == 1:%0A filter_vector = Matrix(%5B(m - 1) * %5B0%5D + %5B1%5D%5D)%0A tv = Determinant(filter_vector * cash_flows)%0A return (1 + long_term_growth_rate) * tv +return (1 + long_term_growth_rate) * terminal_cash_flow / ( @@ -399,19 +399,11 @@ ows= -Matrix(%5B0.%5D +(0, ),%0A @@ -436,375 +436,131 @@ -m, n = cash_flows.shape%0A discount_rate_plus_1 = discount_rate + 1%0A if m == 1:%0A discount_vector = Matrix(%5Bdiscount_rate_plus_1 ** -i for i in range(n)%5D)%0A return Determinant(cash_flows * discount_vector)%0A elif n == 1:%0A discount_vector = Matrix(%5B%5Bdiscount_rate_plus_1 ** -i for i in range(m)%5D%5D)%0A return Determinant(discount_vector * +return reduce(%0A lambda x, y: x + y,%0A %5Bcash_flows%5Bi%5D / ((1 + discount_rate) ** i)%0A for i in range(len( cash @@ -566,9 +566,12 @@ h_flows) +)%5D) %0A
b2485d7879ea00aa745ac735beb786e3dd51cb3b
Refactor LDAPClient.bind() so it acknowledges completion of the request.
ldaptor/protocols/ldap/ldapclient.py
ldaptor/protocols/ldap/ldapclient.py
# Twisted, the Framework of Your Internet # Copyright (C) 2001 Matthew W. Lefkowitz # # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """LDAP protocol client""" from ldaptor.protocols import pureldap, pureber from ldaptor.protocols.ldap import ldaperrors from twisted.python import log from twisted.python.failure import Failure from twisted.internet import protocol, defer class LDAPClientConnectionLostException(ldaperrors.LDAPException): def __str__(self): return 'Connection lost' class LDAPClient(protocol.Protocol): """An LDAP client""" debug = False def __init__(self): self.onwire = {} self.buffer = '' self.connected = None berdecoder = pureldap.LDAPBERDecoderContext_TopLevel( inherit=pureldap.LDAPBERDecoderContext_LDAPMessage( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()))) def dataReceived(self, recd): self.buffer += recd while 1: try: o, bytes = pureber.berDecodeObject(self.berdecoder, self.buffer) except pureldap.BERExceptionInsufficientData: o, bytes = None, 0 self.buffer = self.buffer[bytes:] if not o: break self.handle(o) def connectionMade(self): """TCP connection has opened""" self.connected = 1 def connectionLost(self, reason=protocol.connectionDone): """Called when TCP connection has been lost""" self.connected = 0 def queue(self, op, handler=None, *args, **kwargs): if not self.connected: raise LDAPClientConnectionLostException() msg=pureldap.LDAPMessage(op) if self.debug: log.debug('C->S %s' % repr(msg)) assert not self.onwire.has_key(msg.id) assert op.needs_answer or handler is None assert ((args==() and kwargs=={}) or handler is not None) if op.needs_answer: self.onwire[msg.id]=(handler, args, kwargs) self.transport.write(str(msg)) def unsolicitedNotification(self, msg): log.msg("Got unsolicited notification: %s" % repr(msg)) def handle(self, msg): assert isinstance(msg.value, pureldap.LDAPProtocolResponse) if self.debug: log.debug('C<-S %s' % repr(msg)) if msg.id==0: self.unsolicitedNotification(msg.value) else: handler, args, kwargs = self.onwire[msg.id] # Return true to mark request as fully handled if handler is None or handler(msg.value, *args, **kwargs): del self.onwire[msg.id] ##Bind def bind(self, dn='', auth=''): d=defer.Deferred() if not self.connected: d.errback(Failure( LDAPClientConnectionLostException())) else: r=pureldap.LDAPBindRequest(dn=dn, auth=auth) self.queue(r, d.callback) #TODO queue needs info back from callback!!! d.addCallback(self._handle_bind_msg) return d def _handle_bind_msg(self, resp): assert isinstance(resp, pureldap.LDAPBindResponse) assert resp.referral is None #TODO if resp.resultCode==0: return (resp.matchedDN, resp.serverSaslCreds) else: raise Failure( ldaperrors.get(resp.resultCode, resp.errorMessage)) ##Unbind def unbind(self): if not self.connected: raise "Not connected (TODO)" #TODO make this a real object r=pureldap.LDAPUnbindRequest() self.queue(r) self.transport.loseConnection() class LDAPOperation: def __init__(self, client): self.client=client class LDAPSearch(LDAPOperation): def __init__(self, deferred, client, baseObject='', scope=pureldap.LDAP_SCOPE_wholeSubtree, derefAliases=pureldap.LDAP_DEREF_neverDerefAliases, sizeLimit=0, timeLimit=0, typesOnly=0, filter=pureldap.LDAPFilterMatchAll, attributes=[], ): LDAPOperation.__init__(self, client) self.deferred=deferred r=pureldap.LDAPSearchRequest(baseObject=str(baseObject), scope=scope, derefAliases=derefAliases, sizeLimit=sizeLimit, timeLimit=timeLimit, typesOnly=typesOnly, filter=filter, attributes=attributes) self.client.queue(r, self.handle_msg) def handle_msg(self, msg): if isinstance(msg, pureldap.LDAPSearchResultDone): assert msg.referral is None #TODO if msg.resultCode==0: #TODO ldap.errors.success assert msg.matchedDN=='' self.deferred.callback(self) else: try: raise ldaperrors.get(msg.resultCode, msg.errorMessage) except: self.deferred.errback(Failure()) return 1 else: assert isinstance(msg, pureldap.LDAPSearchResultEntry) self.handle_entry(msg.objectName, msg.attributes) return 0 def handle_entry(self, objectName, attributes): pass
Python
0.000171
@@ -3351,84 +3351,8 @@ (r, -d.callback) #TODO queue needs info back from callback!!!%0A%09 d.addCallback( self @@ -3368,16 +3368,19 @@ bind_msg +, d )%0A%09retur @@ -3419,16 +3419,19 @@ lf, resp +, d ):%0A%09asse @@ -3542,23 +3542,27 @@ 0:%0A%09 -return +d.callback( (resp.ma @@ -3591,16 +3591,17 @@ slCreds) +) %0A%09else:%0A @@ -3605,22 +3605,26 @@ e:%0A%09 -raise +d.errback( Failure( @@ -3677,16 +3677,37 @@ essage)) +)%0A return True %0A%0A ##
241407f63bcda57499bf58e3b755df8052ecb3aa
add old nb urls and witch not found to 404
src/front/views/nb_urls.py
src/front/views/nb_urls.py
from django.core.urlresolvers import reverse, reverse_lazy from django.http import HttpResponsePermanentRedirect, HttpResponse from django.views.generic import View from events.models import Event from groups.models import SupportGroup class NBUrlsView(View): nb_paths = { '/unsubscribe': reverse_lazy('unsubscribe'), '/inscription': reverse_lazy('subscription_overseas'), '/login': reverse_lazy('oauth_redirect_view'), '/users/event_pages/new?parent_id=103': reverse_lazy('create_event'), '/users/event_pages/new?parent_id=73': reverse_lazy('create_group'), '/users/event_pages/new?parent_id=38840': reverse_lazy('create_event'), '/agir': reverse_lazy('volunteer'), '/inscription_detail': reverse_lazy('change_profile'), '/projet': 'https://avenirencommun.fr/avenir-en-commun/', '/le_projet': 'https://avenirencommun.fr/avenir-en-commun/', '/livrets_thematiques': 'https://avenirencommun.fr/livrets-thematiques/', '/convention': 'https://convention.jlm2017.fr/', '/commander_du_materiel': 'https://materiel.lafranceinsoumise.fr/', '/materiel': 'https://materiel.lafranceinsoumise.fr/', '/actualites': 'https://lafranceinsoumise.fr/actualites/', '/le_blog': 'http://melenchon.fr/', '/donner': 'https://dons.lafranceinsoumise.fr/', '/groupes_appui': 'https://lafranceinsoumise.fr/carte', '/groupes_d_appui': 'https://lafranceinsoumise.fr/carte', '/groupes_appui_redirige': 'https://lafranceinsoumise.fr/carte', '/evenements_locaux_redirige': 'https://lafranceinsoumise.fr/carte', '/evenements_locaux': 'https://lafranceinsoumise.fr/carte', '/les_groupes_d_appui': 'https://lafranceinsoumise.fr/carte', '/creer_ou_rejoindre_un_groupe_d_appui': 'https://lafranceinsoumise.fr/carte', '/actualites-groupes-appui': 'https://lafranceinsoumise.fr/category/actualites-groupes-appui/', '/groupes_proches': 'https://lafranceinsoumise.fr/groupes-appui/carte-groupes-dappui/', '/evenements_proches': 'https://lafranceinsoumise.fr/groupes-appui/carte-groupes-dappui/', '/caravanes_liste': 'https://lafranceinsoumise.fr/groupes-appui/les-casserolades/', '/carte': 'https://lafranceinsoumise.fr/carte', '/merci': 'https://lafranceinsoumise.fr/merci', '/18_mrs': 'https://18mars2017.fr/', '/universites_populaires': 'https://avenirencommun.fr/univpop_programme/', } def get(self, request, nb_path): event = Event.objects.filter(nb_path=nb_path, published=True).first() if event: return HttpResponsePermanentRedirect(reverse('view_event', args=[event.id])) group = SupportGroup.objects.filter(nb_path=nb_path, published=True).first() if group: return HttpResponsePermanentRedirect(reverse('view_group', args=[group.id])) try: nb_url = nb_path if request.META['QUERY_STRING']: nb_url = nb_url + '?' + request.META['QUERY_STRING'] url = self.nb_paths[nb_url] return HttpResponsePermanentRedirect(url) except KeyError: pass return HttpResponse(status=503)
Python
0.000001
@@ -2499,24 +2499,107 @@ rogramme/',%0A + '/agenda_melenchon': 'https://agir.lafranceinsoumise.fr/agenda/melenchon/'%0A %7D%0A%0A d @@ -3181,34 +3181,224 @@ -url = self.nb_paths%5Bnb_url + try:%0A url = self.nb_paths%5Bnb_url%5D%0A return HttpResponsePermanentRedirect(url)%0A except KeyError:%0A pass%0A url = self.nb_paths%5Bnb_path %5D%0A @@ -3531,9 +3531,9 @@ tus= -503 +404 )%0A
324bc231a65263ea1a7aaf16b027ca3f5179cd03
Fix nop filters for request documents
src/ggrc/models/request.py
src/ggrc/models/request.py
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """A module for Request object""" # pylint: disable=fixme from sqlalchemy import orm from ggrc import db from ggrc.models import audit from ggrc.models import reflection from ggrc.models import relationship from ggrc.models.comment import Commentable from ggrc.models.mixins import Base from ggrc.models.mixins import CustomAttributable from ggrc.models.mixins import Described from ggrc.models.mixins import FinishedDate from ggrc.models.mixins import Slugged from ggrc.models.mixins import Titled from ggrc.models.mixins import VerifiedDate from ggrc.models.mixins import statusable from ggrc.models.mixins.with_similarity_score import WithSimilarityScore from ggrc.models.mixins.autostatuschangeable import AutoStatusChangeable from ggrc.models.deferred import deferred from ggrc.models.mixins.assignable import Assignable from ggrc.models.object_document import Documentable from ggrc.models.object_person import Personable from ggrc.utils import similarity_options as similarity_options_module class Request(statusable.Statusable, AutoStatusChangeable, Assignable, Documentable, Personable, CustomAttributable, relationship.Relatable, WithSimilarityScore, Titled, Slugged, Described, Commentable, FinishedDate, VerifiedDate, Base, db.Model): """Class representing Requests. Request is an object representing a request from a Requester to Assignee to provide feedback, evidence or attachment in the form of comments, documents or URLs that (if specified) Verifier has to approve of before Request is considered finished. """ __tablename__ = 'requests' _title_uniqueness = False VALID_TYPES = (u'documentation', u'interview') ASSIGNEE_TYPES = (u'Assignee', u'Requester', u'Verifier') similarity_options = similarity_options_module.REQUEST # TODO Remove requestor and requestor_id on database cleanup requestor_id = db.Column(db.Integer, db.ForeignKey('people.id')) requestor = db.relationship('Person', foreign_keys=[requestor_id]) # TODO Remove request_type on database cleanup request_type = deferred(db.Column(db.Enum(*VALID_TYPES), nullable=False), 'Request') start_date = deferred(db.Column(db.Date, nullable=False), 'Request') end_date = deferred(db.Column(db.Date, nullable=False), 'Request') # TODO Remove audit_id audit_object_id on database cleanup audit_id = db.Column(db.Integer, db.ForeignKey('audits.id'), nullable=False) audit_object_id = db.Column(db.Integer, db.ForeignKey('audit_objects.id'), nullable=True) gdrive_upload_path = deferred(db.Column(db.String, nullable=True), 'Request') # TODO Remove test and notes columns on database cleanup test = deferred(db.Column(db.Text, nullable=True), 'Request') notes = deferred(db.Column(db.Text, nullable=True), 'Request') _publish_attrs = [ 'requestor', 'request_type', 'gdrive_upload_path', 'start_date', 'end_date', 'status', 'audit', 'test', 'notes', 'title', 'description' ] _tracked_attrs = ((set(_publish_attrs) | {'slug'}) - {'status'}) _sanitize_html = [ 'gdrive_upload_path', 'test', 'notes', 'description', 'title' ] _aliases = { "request_audit": { "display_name": "Audit", "filter_by": "_filter_by_request_audit", "mandatory": True, }, "end_date": "Due On", "notes": "Notes", "request_type": "Request Type", "start_date": "Starts On", "status": { "display_name": "Status", "handler_key": "request_status", }, "test": "Test", "related_assignees": { "display_name": "Assignee", "mandatory": True, "filter_by": "_filter_by_related_assignees", "type": reflection.AttributeInfo.Type.MAPPING, }, "related_requesters": { "display_name": "Requester", "mandatory": True, "filter_by": "_filter_by_related_requesters", "type": reflection.AttributeInfo.Type.MAPPING, }, "related_verifiers": { "display_name": "Verifier", "filter_by": "_filter_by_related_verifiers", "type": reflection.AttributeInfo.Type.MAPPING, }, "request_url": { "display_name": "Url", "filter_by": "_nop", "type": reflection.AttributeInfo.Type.SPECIAL_MAPPING, }, "request_evidence": { "display_name": "Evidence", "filter_by": "_nop", "type": reflection.AttributeInfo.Type.SPECIAL_MAPPING, }, } def _display_name(self): # pylint: disable=unsubscriptable-object if len(self.title) > 32: display_string = self.description[:32] + u'...' elif self.title: display_string = self.title elif len(self.description) > 32: display_string = self.description[:32] + u'...' else: display_string = self.description return u'Request with id {0} "{1}" for Audit "{2}"'.format( self.id, display_string, self.audit.display_name ) @classmethod def eager_query(cls): query = super(Request, cls).eager_query() return query.options( orm.joinedload('audit')) @classmethod def _filter_by_related_assignees(cls, predicate): return cls._get_relate_filter(predicate, "Assignee") @classmethod def _filter_by_related_requesters(cls, predicate): return cls._get_relate_filter(predicate, "Requester") @classmethod def _filter_by_related_verifiers(cls, predicate): return cls._get_relate_filter(predicate, "Verifier") @classmethod def _filter_by_request_audit(cls, predicate): return cls.query.filter( (audit.Audit.id == cls.audit_id) & (predicate(audit.Audit.slug) | predicate(audit.Audit.title)) ).exists() @classmethod def default_request_type(cls): return cls.VALID_TYPES[0]
Python
0
@@ -4550,35 +4550,45 @@ %22filter_by%22: %22_ -nop +filter_by_url %22,%0A %22ty @@ -4743,11 +4743,26 @@ : %22_ -nop +filter_by_evidence %22,%0A @@ -5852,24 +5852,169 @@ Verifier%22)%0A%0A + @classmethod%0A def _filter_by_url(cls, predicate):%0A return None%0A%0A @classmethod%0A def _filter_by_evidence(cls, predicate):%0A return None%0A%0A @classmeth
6ff7389f85485b8aa2848aa0e7420569c0c06f37
Update pluginLoader.
src/gopher/agent/plugin.py
src/gopher/agent/plugin.py
# # Copyright (c) 2010 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os import sys from logging import getLogger log = getLogger(__name__) class PluginLoader: """ Agent plugins loader. """ ROOT = '/var/lib/gopher' PLUGINS = 'gopherplugins' @classmethod def abspath(cls): return os.path.join(cls.ROOT, cls.PLUGINS) def __init__(self): path = self.abspath() if os.path.exists(path): return os.makedirs(path) pkg = os.path.join(path, '__init__.py') f = open(pkg, 'w') f.close() def load(self): """ Load the plugins. """ sys.path.append(self.ROOT) path = self.abspath() for fn in os.listdir(path): if fn.startswith('__'): continue if not fn.endswith('.py'): continue self.__import(fn) def __import(self, fn): """ Import a module by file name. @param fn: The module file name. @type fn: str """ mod = fn.rsplit('.', 1)[0] imp = '%s.%s' % (self.PLUGINS, mod) try: __import__(imp) log.info('plugin "%s", imported', imp) except: log.error('plugin "%s", import failed', imp, exc_info=True)
Python
0
@@ -788,22 +788,16 @@ GINS = ' -gopher plugins' @@ -954,16 +954,20 @@ if +not os.path. @@ -988,23 +988,8 @@ - return%0A @@ -1018,19 +1018,18 @@ -pkg +fn = os.pa @@ -1078,11 +1078,10 @@ pen( -pkg +fn , 'w
b38497b06d52f230f9688e59a107349b4867810e
fix -- output to stdout, not to stderr as by default..
src/helpers/main_helper.py
src/helpers/main_helper.py
import logging import os from synthesis.smt_logic import Logic from synthesis.solvers import Z3_Smt_NonInteractive_ViaFiles, Z3_Smt_Interactive from third_party.ansistrm import ColorizingStreamHandler from translation2uct.ltl2automaton import Ltl2UCW def get_root_dir() -> str: #make paths independent of current working directory rel_path = str(os.path.relpath(__file__)) bosy_dir_toks = ['./'] + rel_path.split(os.sep) # abspath returns 'windows' (not cygwin) path root_dir = ('/'.join(bosy_dir_toks[:-1]) + '/../../') # root dir is two levels up compared to helpers/. return root_dir def setup_logging(verbose): level = None if verbose is 0: level = logging.INFO elif verbose >= 1: level = logging.DEBUG handler = ColorizingStreamHandler() handler.setFormatter(logging.Formatter(fmt="%(asctime)-10s%(message)s", datefmt="%H:%M:%S")) root = logging.getLogger() root.addHandler(handler) root.setLevel(level) return logging.getLogger(__name__) class Z3SolverFactory: def __init__(self, smt_tmp_files_prefix, z3_path, logic, logger, is_incremental:bool): self.smt_tmp_files_prefix = smt_tmp_files_prefix self.z3_path = z3_path self.logic = logic self.logger = logger self.is_incremental = is_incremental def create(self, seed=''): if self.is_incremental: solver = Z3_Smt_Interactive(self.logic, self.z3_path, self.logger) else: solver = Z3_Smt_NonInteractive_ViaFiles(self.smt_tmp_files_prefix+seed, self.z3_path, self.logic, self.logger) return solver def create_spec_converter_z3(logger:logging.Logger, logic:Logic, is_incremental:bool, smt_tmp_files_prefix:str=None): """ Return ltl to automaton converter, Z3 solver """ assert smt_tmp_files_prefix or is_incremental from config import z3_path, ltl3ba_path converter = Ltl2UCW(ltl3ba_path) solver_factory = Z3SolverFactory(smt_tmp_files_prefix, z3_path, logic, logger, is_incremental) return converter, solver_factory def remove_files_prefixed(file_prefix:str): """ Remove files from the current directory prefixed with a given prefix """ for f in os.listdir(): if f.startswith(file_prefix): os.remove(f)
Python
0
@@ -18,16 +18,27 @@ port os%0A +import sys%0A from syn @@ -905,16 +905,48 @@ %25M:%25S%22)) +%0A handler.stream = sys.stdout %0A%0A ro
60f3c4e1bbd25d781cfba5993aac647d937c64c9
add BillSource to public interface
opencivicdata/models/__init__.py
opencivicdata/models/__init__.py
# flake8: NOQA from .jurisdiction import Jurisdiction, JurisdictionSession from .division import Division from .people_orgs import ( Organization, OrganizationIdentifier, OrganizationName, OrganizationContactDetail, OrganizationLink, OrganizationSource, Person, PersonIdentifier, PersonName, PersonContactDetail, PersonLink, PersonSource, Post, PostContactDetail, PostLinks, Membership, MembershipContactDetail, MembershipLink ) from .bill import (Bill, BillSummary, BillTitle, BillName, RelatedBill, BillSponsor, BillDocument, BillVersion, BillDocumentLink, BillVersionLink)
Python
0
@@ -606,10 +606,22 @@ sionLink +, BillSource )%0A
479a6cfaacd34cb6cda02ebaeba057fde593341b
Set default headers value in parameters
openfisca_web_api/wsgihelpers.py
openfisca_web_api/wsgihelpers.py
# -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <[email protected]> # # Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Decorators to wrap functions to make them WSGI applications. The main decorator :class:`wsgify` turns a function into a WSGI application. """ import collections import datetime import json from functools import update_wrapper import webob.dec import webob.exc def N_(message): return message errors_title = { 400: N_("Unable to Access"), 401: N_("Access Denied"), 403: N_("Access Denied"), 404: N_("Unable to Access"), } def wsgify(func, *args, **kwargs): result = webob.dec.wsgify(func, *args, **kwargs) update_wrapper(result, func) return result def handle_cross_origin_resource_sharing(ctx): # Cf http://www.w3.org/TR/cors/#resource-processing-model environ = ctx.req.environ headers = [] origin = environ.get('HTTP_ORIGIN') if origin is None: return headers if ctx.req.method == 'OPTIONS': method = environ.get('HTTP_ACCESS_CONTROL_REQUEST_METHOD') if method is None: return headers headers_name = environ.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS') or '' headers.append(('Access-Control-Allow-Credentials', 'true')) headers.append(('Access-Control-Allow-Origin', origin)) headers.append(('Access-Control-Max-Age', '3628800')) headers.append(('Access-Control-Allow-Methods', method)) headers.append(('Access-Control-Allow-Headers', headers_name)) raise webob.exc.status_map[204](headers = headers) # No Content headers.append(('Access-Control-Allow-Credentials', 'true')) headers.append(('Access-Control-Allow-Origin', origin)) headers.append(('Access-Control-Expose-Headers', 'WWW-Authenticate')) return headers def respond_json(ctx, data, code = None, headers = None, json_dumps_default = None, jsonp = None): """Return a JSON response. This function is optimized for JSON following `Google JSON Style Guide <http://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml>`_, but will handle any JSON except for HTTP errors. """ if isinstance(data, collections.Mapping): # Remove null properties as recommended by Google JSON Style Guide. data = type(data)( (name, value) for name, value in data.iteritems() if value is not None ) error = data.get('error') if isinstance(error, collections.Mapping): error = data['error'] = type(error)( (name, value) for name, value in error.iteritems() if value is not None ) else: error = None if headers is None: headers = [] if jsonp: content_type = 'application/javascript; charset=utf-8' else: content_type = 'application/json; charset=utf-8' if error: code = code or error['code'] assert isinstance(code, int) response = webob.exc.status_map[code](headers = headers) response.content_type = content_type if code == 204: # No content return response if error.get('code') is None: error['code'] = code if error.get('message') is None: title = errors_title.get(code) title = ctx._(title) if title is not None else response.status error['message'] = title else: response = ctx.req.response response.content_type = content_type if code is not None: response.status = code response.headers.update(headers) # try: # text = json.dumps(data, encoding = 'utf-8', ensure_ascii = False, indent = 2) # except UnicodeDecodeError: # text = json.dumps(data, ensure_ascii = True, indent = 2) if json_dumps_default is None: text = json.dumps(data) else: text = json.dumps(data, default = json_dumps_default) text = unicode(text) if jsonp: text = u'{0}({1})'.format(jsonp, text) response.text = text return response def convert_date_to_json(obj): if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() else: return json.JSONEncoder().default(obj)
Python
0.000001
@@ -2650,20 +2650,18 @@ aders = -None +%5B%5D , json_d @@ -3520,53 +3520,8 @@ one%0A - if headers is None:%0A headers = %5B%5D%0A
d52132ea28fad48b07263c8ad57a98e4690ec852
fix cranberry schema to have unsigned int for temp
Gateway_v2/decoder.py
Gateway_v2/decoder.py
from xbee import ZigBee import sys import serial import datetime import struct import collections import csv import os.path import psycopg2 class Decoder: def __init__(self): self.schemaDict = { 'ga_legacy': 'HHBI'+'B'+'H'*6+'H'*6+'IhH'+'H'*20, '0': 'HHIH', #Heartbeat schema '1': 'HHIHHIhHH', #Apple schema '2': 'HHIHHHhHI', #Cranberry schema '3': 'HHIHHIHHI' #Dragonfruit Schema } self.callbacks = [] """ Checks if the packet has a valid schema Returns a boolean and creates schema number variable """ def check_schema(self, data): self.schema_num = struct.unpack('<' + 'H', data[0:2])[0] for key in self.schemaDict: if str(self.schema_num) == key: return True return False """ Main Function """ def decode_data(self, data, timestamp): if self.check_schema(data): dataDict = self.sort_packet(data, timestamp) for callback in self.callbacks: callback(dataDict) else: print "Not A Valid Packet" """ Displays given data dictonary """ def print_dictionary(self, dataDict): for key, value in dataDict.iteritems(): print key + ": " + str(value) """ Write the decoded data to respective csv file """ def write_to_file(self, dataDict): fileExists = True if(self.schema_num == 0): fileName = 'heartbeat_data.csv' elif(self.schema_num == 1): fileName = 'apple_data.csv' elif(self.schema_num == 2): fileName = 'cranberry_data.csv' elif(self.schema_num == 3): fileName = 'dragonfruit_data.csv' if(os.path.isfile(fileName) == False): fileExists = False dataString = '' for key, value in dataDict.iteritems(): dataString += str(value) dataString += ',' dataString = dataString[:-1] dataString += '\n' with open(fileName, 'a') as csvfile: if(fileExists == False): headerString = "" for key, value in dataDict.iteritems(): headerString += str(key) + ',' headerString = headerString[:-1] headerString += '\n' csvfile.write(headerString) csvfile.write(dataString) """ Write decoded data to respective table in database """ def write_to_db(self, dataDict): #make connection to database, this can be added elsewhere so it will only be done once con = psycopg2.connect("dbname='control_tower' user='control_tower'") cur = con.cursor() if self.schema_num == 0: tableName = 'heartbeat' elif self.schema_num == 1: tableName = 'apple' elif self.schema_num == 2: tableName = 'cranberry' elif self.schema_num == 3: tableName = 'dragonfruit' else: print "Invalid packet schema" return #create a new empty row cur.execute("INSERT INTO %s (time_received) VALUES ('%s')" %(tableName, dataDict["time_received"])) #insert data into newly created row for key, value in dataDict.iteritems(): if key != 'time_received': sqlCommand = "UPDATE %s SET %s = %s WHERE time_received = '%s'" %(tableName, key, str(value), dataDict["time_received"]) cur.execute(sqlCommand) con.commit() def register_callback(self, callback): self.callbacks.append(callback) """ Sorts data into a specific schema dictionary """ def sort_packet(self, data, timestamp): fmt = '<' + self.schemaDict[str(self.schema_num)] dataDict = {} unpacked_data = struct.unpack(fmt,data) dataDict["time_received"] = str(timestamp) if self.schema_num == 1: #apple schema dataDict["schema"] = unpacked_data[0] dataDict["node_addr"] = unpacked_data[1] dataDict["uptime_ms"] = unpacked_data[2] dataDict["batt_mv"] = unpacked_data[3] dataDict["panel_mv"] = unpacked_data[4] dataDict["press_pa"] = unpacked_data[5] dataDict["temp_c"] = unpacked_data[6] dataDict["humidity_centi_pct"] = unpacked_data[7] dataDict["apogee_w_m2"] = unpacked_data[8] elif self.schema_num == 2: #cranberry schema dataDict["schema"] = unpacked_data[0] dataDict["node_addr"] = unpacked_data[1] dataDict["uptime_ms"] = unpacked_data[2] dataDict["batt_mv"] = unpacked_data[3] dataDict["panel_mv"] = unpacked_data[4] dataDict["apogee_w_m2"] = unpacked_data[5] dataDict["temp_cK"] = unpacked_data[6] dataDict["humidity_pct"] = unpacked_data[7] dataDict["press_pa"] = unpacked_data[8] elif self.schema_num == 3: #dragonfruit schema dataDict["schema"] = unpacked_data[0] dataDict["node_addr"] = unpacked_data[1] dataDict["uptime_ms"] = unpacked_data[2] dataDict["batt_mv"] = unpacked_data[3] dataDict["panel_mv"] = unpacked_data[4] dataDict["apogee_sp215"] = unpacked_data[5] dataDict["temp_cK"] = unpacked_data[6] dataDict["humidity_pct"] = unpacked_data[7] dataDict["press_pa"] = unpacked_data[8] elif self.schema_num == 0: #heartbeat schema dataDict["schema"] = unpacked_data[0] dataDict["node_addr"] = unpacked_data[1] dataDict["uptime_ms"] = unpacked_data[2] dataDict["batt_mv"] = unpacked_data[3] return collections.OrderedDict(sorted(dataDict.items()))
Python
0.000001
@@ -345,17 +345,17 @@ 'HHIHHH -h +H HI', #Cr
5232597d574f7089f592aac0a5f25efd1ff7763a
Update test_blt.py.
openrcv/test/formats/test_blt.py
openrcv/test/formats/test_blt.py
from textwrap import dedent from openrcv.formats.blt import BLTFileWriter from openrcv.models import BallotsResource, ContestInput from openrcv.utils import StringInfo from openrcv.utiltest.helpers import UnitCase class BLTFileWriterTest(UnitCase): def test(self): contest = ContestInput() contest.name = "Foo" contest.candidates = ['A', 'B', 'C'] contest.seat_count = 1 ballots = [ (2, (2, 1)), (1, (2, )), ] contest.ballots_resource = BallotsResource(ballots) stream_info = StringInfo() writer = BLTFileWriter(stream_info) writer.write_contest(contest) expected = dedent("""\ 3 1 2 2 1 0 1 2 0 0 "A" "B" "C" "Foo\" """) self.assertEqual(stream_info.value, expected)
Python
0
@@ -139,20 +139,22 @@ openrcv. -util +stream s import @@ -160,20 +160,24 @@ t String -Info +Resource %0Afrom op @@ -558,27 +558,24 @@ -stream_info +resource = Strin @@ -579,12 +579,16 @@ ring -Info +Resource ()%0A @@ -617,27 +617,24 @@ eWriter( -stream_info +resource )%0A @@ -841,25 +841,25 @@ ual( -stream_info.value +resource.contents , ex
ff63f077fe68ae18b409598a3860d0abbc7442e3
fix num_topics property
orangecontrib/text/topics/hdp.py
orangecontrib/text/topics/hdp.py
from gensim import models from .topics import GensimWrapper class HdpModel(models.HdpModel): def __init__(self, corpus, id2word, **kwargs): # disable fitting during initialization _update = self.update self.update = lambda x: x super().__init__(corpus, id2word, **kwargs) self.update = _update class HdpWrapper(GensimWrapper): name = 'Hdp Model' Model = HdpModel def __init__(self, **kwargs): self.kwargs = kwargs def reset_model(self, corpus): self.model = self.Model(corpus=corpus, id2word=corpus.ngrams_dictionary, **self.kwargs) @property def num_topics(self): return self.model.m_lambda.shape[0]
Python
0.000004
@@ -476,16 +476,42 @@ = kwargs +%0A self.model = None %0A%0A de @@ -751,9 +751,30 @@ shape%5B0%5D + if self.model else 0 %0A
3a247b72ba39bb2f49099905c435127aea424fe0
Remove unused variable
lib/backend_common/tests/conftest.py
lib/backend_common/tests/conftest.py
"""Configure a mock application to run queries against""" import pytest from flask_login import current_user from flask import jsonify from backend_common import create_app, auth, auth0, mocks from os.path import join, dirname FAKE_CLIENT_SECRETS = """ { "web": { "auth_uri": "https://auth.mozilla.auth0.com/authorize", "issuer": "https://auth.mozilla.auth0.com/", "client_id": "some-id-string", "client_secret": "my-super-secret", "redirect_uris": [ "https://signoff.shipit.mozilla.com/oidc_callback" ], "token_uri": "https://auth.mozilla.auth0.com/oauth/token", "token_introspection_uri": "https://test/oauth/token", "userinfo_uri": "https://auth.mozilla.auth0.com/userinfo" } } """ @pytest.fixture(scope='module') def app(): """ Build an app with an authenticated dummy api """ # Use unique auth instance config = { 'DEBUG': True, 'OIDC_CLIENT_SECRETS': join(dirname(__file__), 'client_secrets.json'), 'OIDC_RESOURCE_SERVER_ONLY': True } app = create_app('test', extensions=[auth, auth0], config=config) @app.route('/') def index(): return app.response_class('OK') @app.route('/test-auth-login') @auth.auth.require_login def logged_in(): data = { 'auth': True, 'user': current_user.get_id(), # permissions is a set, not serializable 'scopes': list(current_user.permissions), } return jsonify(data) @app.route('/test-auth-scopes') @auth.auth.require_scopes([ ['project/test/A', 'project/test/B'], ['project/test-admin/*'], ]) def scopes(): return app.response_class('Your scopes are ok.') @app.route('/test-auth0-userinfo') @auth0.accept_token() def auth0_token(): return app.response_class('OK') # Add fake swagger url, used by redirect app.api.swagger_url = '/' return app @pytest.yield_fixture(scope='module') def client(app): """ A Flask test client. """ with app.test_client() as client: with mocks.apply_mockups(): yield client
Python
0.000015
@@ -226,559 +226,8 @@ me%0A%0A -FAKE_CLIENT_SECRETS = %22%22%22%0A%7B%0A %22web%22: %7B%0A %22auth_uri%22: %22https://auth.mozilla.auth0.com/authorize%22,%0A %22issuer%22: %22https://auth.mozilla.auth0.com/%22,%0A %22client_id%22: %22some-id-string%22,%0A %22client_secret%22: %22my-super-secret%22,%0A %22redirect_uris%22: %5B%0A %22https://signoff.shipit.mozilla.com/oidc_callback%22%0A %5D,%0A %22token_uri%22: %22https://auth.mozilla.auth0.com/oauth/token%22,%0A %22token_introspection_uri%22: %22https://test/oauth/token%22,%0A %22userinfo_uri%22: %22https://auth.mozilla.auth0.com/userinfo%22%0A %7D%0A%7D%0A%22%22%22%0A%0A %0A@py
6cd9af9d1c2f6b7e366c4bcc0b7c7422d4f776be
Add device events hook to app engine app.
src/appengine/main.py
src/appengine/main.py
import json import logging import os import random import string import sys from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext.webapp import util from google.appengine.ext.webapp.util import login_required from google.appengine.ext import db from google.appengine.ext.db import polymodel sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../third_party')) import flask from flask import request import accounts import model import pusher from common import creds def StaticDir(): return os.path.normpath(os.path.join(os.path.dirname(__file__), '../static')) app = flask.Flask('domics', static_folder=StaticDir()) app.debug = True @app.route('/') def Root(): return flask.send_from_directory(StaticDir(), 'index.html') @app.before_request def BeforeRequest(): user = users.get_current_user() if not user and request.endpoint not in {'/'}: return flaskredirect(users.create_login_url(request.endpoint)) logging.info(user) person = model.Person.get_or_insert(key_name=user.user_id()) def GetUser(): user = users.get_current_user() assert user is not None return model.Person.get_or_insert(key_name=user.user_id()) @app.route('/api/user', methods=['GET']) def GetUserRequest(): user = GetUser() return flask.jsonify(id=user.key().id_or_name(), **db.to_dict(user)) @app.route('/api/channel') def post(chan_name): event = json.loads(self.request.body) print event p = pusher.Pusher(app_id=creds.pusher_app_id, key=creds.pusher_key, secret=creds.pusher_secret) p[chan_name].trigger('event', event) @app.route('/api/device/<int:device_id>', methods=['POST']) def CreateUpdateDevice(device_id): body = json.loads(flask.request.data) device = model.Device.get_by_id(device_id) if not device: device = Devices.CreateDevice(body) else: device.update(body) device.put() @app.route('/api/device/<int:device_id>', methods=['GET']) def GetDevice(device_id): device = model.Device.get_by_id(device_id) if not device: flask.abort(404) return flask.jsonify(**db.to_dict(device)) @app.route('/api/device/<int:device_id>/event') def DeviceEvent(device_id): device = model.Device.get_by_id(device_id) if not device: flask.abort(404) event = json.loads(flask.request.data) device.Event(event)
Python
0
@@ -917,16 +917,8 @@ /'%7D: - %0A @@ -1001,17 +1001,16 @@ fo(user) - %0A perso @@ -1509,17 +1509,16 @@ _app_id, - %0A key @@ -1601,24 +1601,158 @@ ', event)%0A%0A%0A [email protected]('/api/device/events', methods=%5B'POST'%5D)%0Adef DeviceEvents():%0A body = json.loads(flask.request.data)%0A logging.info(body)%0A%0A%0A @app.route('
f10797b4b39c262fdb8a250386f7c61e7922005a
Check for CLI updates (from private pip by default)
src/azure/cli/main.py
src/azure/cli/main.py
import os import sys from ._argparse import ArgumentParser from ._logging import configure_logging, logger from ._session import Session from ._output import OutputProducer from azure.cli.extensions import event_dispatcher # CONFIG provides external configuration options CONFIG = Session() # SESSION provides read-write session variables SESSION = Session() def main(args, file=sys.stdout): #pylint: disable=redefined-builtin CONFIG.load(os.path.expanduser('~/az.json')) SESSION.load(os.path.expanduser('~/az.sess'), max_age=3600) configure_logging(args, CONFIG) from ._locale import install as locale_install locale_install(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'locale', CONFIG.get('locale', 'en-US'))) event_dispatcher.raise_event(event_dispatcher.REGISTER_GLOBAL_PARAMETERS, event_data={'args': args}) parser = ArgumentParser("az") import azure.cli.commands as commands # Find the first noun on the command line and only load commands from that # module to improve startup time. for a in args: if not a.startswith('-'): commands.add_to_parser(parser, a) break else: # No noun found, so load all commands. commands.add_to_parser(parser) try: cmd_result = parser.execute(args) # Commands can return a dictionary/list of results # If they do, we print the results. if cmd_result.result: formatter = OutputProducer.get_formatter(cmd_result.output_format) OutputProducer(formatter=formatter, file=file).out(cmd_result.result) except RuntimeError as ex: logger.error(ex.args[0]) return ex.args[1] if len(ex.args) >= 2 else -1 except KeyboardInterrupt: return -1
Python
0
@@ -14,16 +14,57 @@ port sys +%0Afrom datetime import datetime, timedelta %0A%0Afrom . @@ -260,16 +260,99 @@ atcher%0A%0A +from azure.cli.utils.update_checker import check_for_cli_update, UpdateCheckError%0A%0A # CONFIG @@ -481,16 +481,1525 @@ sion()%0A%0A +UPDATE_CHECK_DATE_FORMAT = %22%25Y-%25m-%25d %25H:%25M:%25S%22%0ADAYS_BETWEEN_UPDATE_CHECK = 7%0A%0Adef _should_check_for_update(config_key, now):%0A config_data = CONFIG.get(config_key)%0A if not config_data:%0A return True%0A last_checked = datetime.strptime(config_data%5B'last_checked'%5D, UPDATE_CHECK_DATE_FORMAT)%0A expiry = last_checked + timedelta(days=DAYS_BETWEEN_UPDATE_CHECK)%0A # prev check not expired yet and there wasn't an update available from our previous check%0A return False if expiry %3E= now and not config_data%5B'update_available'%5D else True%0A%0Adef _save_update_data(config_key, now, update_info):%0A CONFIG%5Bconfig_key%5D = %7B%0A 'last_checked': now.strftime(UPDATE_CHECK_DATE_FORMAT),%0A 'latest_version': str(update_info%5B'latest_version'%5D) if update_info%5B'latest_version'%5D else None,%0A 'current_version': str(update_info%5B'current_version'%5D) if update_info%5B'current_version'%5D else None,%0A 'update_available': update_info%5B'update_available'%5D,%0A %7D%0A%0Adef _check_for_cli_update():%0A config_key = 'update_check_cli'%0A now = datetime.now()%0A if not _should_check_for_update(config_key, now):%0A return%0A # TODO:: CREATE ENV VAR TO SET THIS TO TRUE%0A update_info = check_for_cli_update(private=True)%0A _save_update_data(config_key, now, update_info)%0A if update_info%5B'update_available'%5D:%0A print(%22Current version of CLI %7B%7D. Version %7B%7D is available. Update with %60az components update self%60%22.format(update_info%5B'current_version'%5D, update_info%5B'latest_version'%5D))%0A%0A def main @@ -3253,16 +3253,16 @@ format)%0A - @@ -3335,16 +3335,48 @@ result)%0A + _check_for_cli_update()%0A exce
f6be63484b083c07b09616c8e3dd2f916e0a4488
Change folder path of patches' tfrecords
src/btc_parameters.py
src/btc_parameters.py
# Brain Tumor Classification # Script for Hyper-Parameters # Author: Qixun Qu # Create on: 2017/10/14 # Modify on: 2017/10/28 # ,,, ,,, # ;" '; ;' ", # ; @.ss$$$$$$s.@ ; # `s$$$$$$$$$$$$$$$' # $$$$$$$$$$$$$$$$$$ # $$$$P""Y$$$Y""W$$$$$ # $$$$ p"$$$"q $$$$$ # $$$$ .$$$$$. $$$$' # $$$DaU$$O$$DaU$$$' # '$$$$'.^.'$$$$' # '&$$$$$&' ''' Hyper-parameters for training pipeline -1- Basic Settings: - train_path: string, the path of tfrecord for training - validate_path: string, the path of tfrecord for validating - train_num: int, the number of patches in training set - validate_num: int, the number of patches in validating set - classes_num: int, the number of grading groups - patch_shape: int list, each patch's shape - capacity: int, the maximum number of elements in the queue - min_after_dequeue: int, minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements -2- Parameters for Training: - batch_size: int, the number of patches in one batch - num_epoches: int, the number of epoches - learning_rate_first: float, the learning rate for first epoch - learning_rate_last: float, the learning rate for last epoch - more parameters to be added -3- Parameters for Constructing Model - activation: string, indicates the activation method by either "relu" or "lrelu" (leaky relu) for general cnn models - alpha: float, slope of the leaky relu at x < 0 - bn_momentum: float, momentum for removing average in batch normalization, typically values are 0.999, 0.99, 0.9, etc - drop_rate: float, rate of dropout of input units, which is between 0 and 1 ''' import os import json from btc_settings import * ''' Parameters for General CNN Models ''' # Set path of the folder in where tfrecords are save in parent_dir = os.path.dirname(os.getcwd()) tfrecords_dir = os.path.join(parent_dir, DATA_FOLDER, TFRECORDS_FOLDER) # Create paths for training and validating tfrecords tpath = os.path.join(tfrecords_dir, "partial_train.tfrecord") vpath = os.path.join(tfrecords_dir, "partial_validate.tfrecord") # Whole dataset # tpath = os.path.join(tfrecords_dir, "train.tfrecord") # vpath = os.path.join(tfrecords_dir, "validate.tfrecord") # Load dict from json file in which the number of # training and valdating set can be found json_path = os.path.join(TEMP_FOLDER, TFRECORDS_FOLDER, VOLUMES_NUM_FILE) with open(json_path) as json_file: volumes_num = json.load(json_file) train_num = 236 validate_num = 224 # train_num = volumes_num["train"] # validate_num = volumes_num["validate"] cnn_parameters = { # Basic settings "train_path": tpath, "validate_path": vpath, "train_num": train_num, "validate_num": validate_num, "classes_num": 3, "patch_shape": PATCH_SHAPE, "capacity": 350, "min_after_dequeue": 300, # Parameters for training "batch_size": 10, "num_epoches": 1, "learning_rate_first": 1e-3, "learning_rate_last": 1e-4, "l2_loss_coeff": 0.001, # Parameter for model's structure "activation": "relu", # "lrelu", "alpha": None, # "lrelu" "bn_momentum": 0.99, "drop_rate": 0.5 } ''' Parameters for Autoencoder Models ''' cae_parameters = { # Basic settings "train_path": tpath, "validate_path": vpath, "train_num": train_num, "validate_num": validate_num, "classes_num": 3, "patch_shape": PATCH_SHAPE, "capacity": 350, "min_after_dequeue": 300, # Parameters for training "batch_size": 10, "num_epoches": 1, "learning_rate_first": 1e-3, "learning_rate_last": 1e-4, "l2_loss_coeff": 0.001, "sparse_penalty_coeff": 0.001, "sparse_level": 0.05, # Parameter for model's structure "activation": "relu", "bn_momentum": 0.99, "drop_rate": 0.5 }
Python
0
@@ -2018,33 +2018,78 @@ DATA_FOLDER, - TFRECORD +%0A TFRECORDS_FOLDER, PATCHE S_FOLDER)%0A%0A# @@ -2547,16 +2547,57 @@ _FOLDER, +%0A PATCHES_FOLDER, VOLUMES
8ce52602c0804d7888bcf50ce85050e085e0797d
Raise environment.BuildNotFound if we can't patch up to the desired build
environment/__init__.py
environment/__init__.py
""" WoW-style MPQ-based environments """ import os import re import mpq class BuildNotFound(Exception): pass class Base(object): def __init__(self, rawPath, build=None): self.rawPath = rawPath if build is not None: self.setBuild(build) def __repr__(self): if hasattr(self, "_path"): return "Base(%r)" % (self.path()) return "Base(%r)" % (self.rawPath) def build(self): return int(re.match(r"^(\d+).direct$", os.path.basename(self.path())).groups()[0]) def builds(self): """ Returns a dict of base paths for build numbers in {build: path} format """ sre = re.compile(r"^(\d+).direct$") ret = {} for f in os.listdir(self.rawPath): # Here we parse each <build>.direct/ match = sre.match(os.path.basename(f)) if match: ret[int(match.groups()[0])] = f return ret def dataDir(self): return os.path.join(self.path(), "Data") def localeDir(self, locale): return os.path.join(self.dataDir(), locale) def path(self): if not hasattr(self, "_path"): raise RuntimeError("Cannot access Base.path() if Base does not have a build") return os.path.join(self.rawPath, self._path) def patchFiles(self, locale="enUS"): """ Returns a dict of build: patch MPQs. """ files = {} # Old-style wow-updates (oldest) first path = self.dataDir() sre = re.compile(r"^wow-update-(\d+).MPQ$") for f in os.listdir(path): match = sre.match(os.path.basename(f)) if match: fileBuild = int(match.groups()[0]) files[fileBuild] = [os.path.join(path, f)] # Special cases: # wow-update*-13623 has both old-style and new-style patches. # The new style ones are corrupt. We'll just assume that if # we have both old-style and new-style, old-style takes priority. path = self.localeDir(locale) sre = re.compile(r"^wow-update-%s-(\d+).MPQ$" % (locale)) for f in os.listdir(path): match = sre.match(os.path.basename(f)) if match: fileBuild = int(match.groups()[0]) if fileBuild not in files: files[fileBuild] = [os.path.join(path, f)] return files def setBuild(self, build): highestMatch = 0 bases = self.builds() for baseBuild in bases: # We want the highest possible match: # - filter out anything higher than the requested build # - filter out anything lower than our highest match if baseBuild <= build and baseBuild > highestMatch: highestMatch = baseBuild if not highestMatch: raise BuildNotFound(build) self._path = bases[highestMatch] def defaultBase(): # Try $MPQ_BASE_DIR, otherwise use ~/mpq/WoW return Base(os.environ.get("MPQ_BASE_DIR", os.path.join(os.path.expanduser("~"), "mpq", "WoW"))) def highestBase(): base = defaultBase() bases = base.builds() base.setBuild(sorted(bases.keys())[-1]) return base def highestBuild(): return sorted(highestBase().patchFiles().keys())[-1] class Environment(object): def __init__(self, build, locale="enUS", base=defaultBase()): base.setBuild(build) self.base = base self.build = build self.locale = locale self.path = os.path.join(self.base.localeDir(locale), "locale-%s.MPQ" % (locale)) self.mpq = mpq.MPQFile(self.path) if build != base.build(): for patch in self.patchList(): self.mpq.patch(patch) self._cache = {} def __repr__(self): return "Environment(build=%r, locale=%r, base=%r)" % (self.build, self.locale, self.base) def _dbFileName(self, name): # In order to avoid duplicates, we need to standardize the filename name = name.lower() base, ext = os.path.splitext(name) if ext: # Check against valid extensions if ext not in (".dbc", ".wdb", ".db2", ".dba"): raise ValueError("%r is not a known DBFile format" % (ext)) else: # No extension, we need to guess it if name in ("item", "item-sparse"): name += ".db2" elif name.endswith("cache"): name += ".wdb" else: name += ".dbc" return name def _dbFileOpen(self, file): from ..wdbc.db2 import DB2File from ..wdbc.dbc import DBCFile from ..wdbc.structures import getstructure from ..wdbc.utils import getfilename handle = self.open(file) name = getfilename(file) structure = getstructure(name) if name in ("item", "item-sparse"): cls = DB2File else: cls = DBCFile return cls.open(handle, build=self.build, structure=structure, environment=self) def hasDbFile(self, name): name = self._dbFileName(name) if name in self._cache: return True return "DBFilesClient/%s" % (name) in self.mpq def dbFile(self, name): name = self._dbFileName(name) if name not in self._cache: if name.endswith(".wdb"): raise NotImplementedError("Cache files are not supported in environments") self._cache[name] = self._dbFileOpen("DBFilesClient/%s" % (name)) return self._cache[name] def open(self, file): return self.mpq.open(file) def patchList(self): patches = self.base.patchFiles() builds = sorted(patches.keys()) ret = [] for build in builds: if build > self.build: # We only want the patches that correspond to the environment's build break for f in patches[build]: ret.append(f) return ret
Python
0
@@ -4895,16 +4895,230 @@ et = %5B%5D%0A +%0A%09%09# Raise BuildNotFound if we can't patch up to the desired build%0A%09%09# We should raise it in __init__ instead, but it would involve duplicate code%0A%09%09if self.build not in builds:%0A%09%09%09raise BuildNotFound(self.build)%0A%0A %09%09for bu
66608b724112680075d6e41edda7e631da69301e
add stop_id test
ott/otp_client/tests/tests_ti.py
ott/otp_client/tests/tests_ti.py
import os import unittest from ott.otp_client.transit_index.routes import Routes from ott.otp_client.transit_index.stops import Stops def get_db(): from gtfsdb import api, util from ott.utils import file_utils dir = file_utils.get_module_dir(Routes) gtfs_file = os.path.join(dir, '..', 'tests', 'data', 'gtfs', 'multi-date-feed.zip') gtfs_file = gtfs_file.replace('c:\\', '/').replace('\\', '/') gtfs_file = "file://{0}".format(gtfs_file) gtfs_file = gtfs_file.replace('\\', '/') url = util.make_temp_sqlite_db_uri('curr') db = api.database_load(gtfs_file, url=url, current_tables=True) return db class TiTest(unittest.TestCase): db = None def setUp(self): if TiTest.db is None: self.db = get_db() TiTest.db = self.db def test_route(self): # test current routes route = Routes.route_factory(self.db.session, 'NEW') self.assertTrue(route.get('id') == 'DTA:NEW') route = Routes.route_factory(self.db.session, 'OLD') self.assertTrue(route.get('id') == 'DTA:OLD') route = Routes.route_factory(self.db.session, 'ALWAYS') self.assertTrue(route.get('id') == 'DTA:ALWAYS') def test_routes_list(self): # test current routes routes = Routes.route_list_factory(self.db.session) self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) # test query of routes via date (slower) routes = Routes.route_list_factory(self.db.session, date="9-15-2018") self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) def test_stop_routes(self): # test current stop querying it's route list routes = Routes.stop_routes_factory(self.db.session, 'DADAN') self.assertTrue(len(routes) == 2) self.assertTrue(routes[0].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) self.assertTrue(routes[1].get('id') in ('DTA:NEW', 'DTA:ALWAYS')) # test an old stop querying it's route list (assigned to DTA:OLD route) routes = Routes.stop_routes_factory(self.db.session, 'EMSI', date="9-15-2018") self.assertTrue(routes[0].get('id') == 'DTA:OLD') # test an old stop querying it's route list (assigned to DTA:ALWAYS route, but don't show due to OLD stop) routes = Routes.stop_routes_factory(self.db.session, 'OLD', date="9-15-2018") self.assertTrue(len(routes) == 1) self.assertTrue(routes[0].get('id') in ('DTA:OLD', 'DTA:ALWAYS')) # test querying old stop, but with current date ... so no results, since stop is not in CurrentStops routes = Routes.stop_routes_factory(self.db.session, 'OLD') self.assertTrue(len(routes) == 0) def test_bbox_stops(self): # TODO !!! pass # stops = Stops.stop() #import pdb; pdb.set_trace() #for r in routes: print(r)
Python
0.000001
@@ -2967,88 +2967,285 @@ est_ -bbox_stops(self):%0A # TODO !!!%0A pass%0A # stops = Stops.stop() +stop_query(self):%0A stop = Stops.stop(self.db.session, 'NEW')%0A self.assertTrue(stop)%0A self.assertTrue(stop.routes == '50')%0A%0A stop = Stops.stop(self.db.session, 'OLD', def_val=None)%0A self.assertTrue(stop == None)%0A%0A def test_bbox_stops(self): %0A @@ -3250,16 +3250,17 @@ # + import p @@ -3291,31 +3291,56 @@ -#for r in routes: print(r) +pass%0A%0A def test_point_stops(self):%0A pass%0A %0A
a8145f318b4659802d5f9e244354f23b5ed32fb3
fix import error
equinox/nn/attention.py
equinox/nn/attention.py
from typing import Optional import jax import jax.numpy as jnp import jax.random as jrandom from ..custom_types import Array from ..module import Module, static_field from .linear import Dropout, Linear class MultiheadAttention(Module): """Multihead Attention layer from 'Attention Is All You Need' (https://arxiv.org/abs/1706.03762)""" embed_dim: int = static_field() num_heads: int = static_field() kdim: int = static_field() vdim: int = static_field() _qkv_same_embed_dim: bool = static_field() head_dim: int = static_field() q_proj: Linear k_proj: Linear v_proj: Linear out_proj: Linear dropout: Dropout def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, use_bias: bool = True, kdim: Optional[int] = None, vdim: Optional[int] = None, add_bias_kv: bool = False, *, key: "jax.random.PRNGKey", ): """**Arguments:** - `embed_dim`: Dimension of the model. - `num_heads`: Number of parallel attention heads. - `dropout`: Dropout probability on attention matrix. Default: `0.0`. - `use_bias`: Whether to use a bias term on the output projection. Default: `True`. - `kdim`: Total number of features for keys. Default: `None` (use `kdim=embed_dim`). - `vdim`: Total number of features for values. Default: `None` (use `vdim=embed_dim`). - `add_bias_kv`: Whether to use bias term for value and key projections. Default: `False`. - `key`: A `jax.random.PRNGKey` used to provide randomness for parameter initialisation. (Keyword only argument.) """ super().__init__() key1, key2, key3, key4 = jrandom.split(key, 4) self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if kdim is not None else embed_dim self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" if dropout == 0.0: self.dropout = Dropout(dropout, deterministic=True) else: self.dropout = Dropout(dropout) self.q_proj = Linear( self.embed_dim, self.num_heads * self.embed_dim, use_bias=False, key=key1 ) self.k_proj = Linear( self.kdim, self.num_heads * embed_dim, use_bias=add_bias_kv, key=key2 ) self.v_proj = Linear( self.vdim, self.num_heads * embed_dim, use_bias=add_bias_kv, key=key3 ) self.out_proj = Linear( embed_dim * num_heads, embed_dim, use_bias=use_bias, key=key4 ) def __call__( self, query: Array, key: Array, value: Array, attn_mask: Optional[Array] = None, *, key_: Optional["jax.random.PRNGKey"] = None, ) -> Array: """**Arguments:** - `query`: Query embedding. Should be a JAX array of shape `(sequence_lenth, embed_dim)`. - `key`: Key embedding. Should be a JAX array of shape `(sequence_lenth, embed_dim)`. - `value`: Value embedding. Should be a JAX array of shape `(sequence_lenth, embed_dim)`. - `attn_mask`: A mask preventing attention to certain positions. - `key_`: A PRNGKey used for dropout. **Returns:** A JAX array of shape `(sequence_lenth, embed_dim)`. """ d1, d2 = query.shape query_heads = jax.vmap(self.q_proj)(query).reshape( self.embed_dim, self.num_heads, d1 ) key_heads = jax.vmap(self.k_proj)(key).reshape(self.kdim, self.num_heads, d1) value_heads = jax.vmap(self.v_proj)(value).reshape( self.vdim, self.num_heads, d1 ) attn_logits = jnp.einsum("...dhs,...dhS->...hsS", query_heads, key_heads) sqrt_key_size = jnp.sqrt(self.kdim).astype(key.dtype) attn_logits = attn_logits / sqrt_key_size attn_logits = self.dropout(attn_logits, key=key_) if attn_mask is not None: if attn_mask.ndim != attn_logits.ndim: raise ValueError( f"Mask dimensionality {attn_mask.ndim} must match logits " f"{attn_logits.ndim}." ) attn_logits = jnp.where(attn_mask, attn_logits, -1e30) attn_weights = jax.nn.softmax(attn_logits) attn = jnp.einsum("...hsS,...dhS->...hsd", attn_weights, value_heads) attn_vec = jnp.reshape(attn, (*query.shape[:-1], -1)) return jax.vmap(self.out_proj)(attn_vec)
Python
0.000001
@@ -172,14 +172,15 @@ om . -linear +dropout imp @@ -190,17 +190,36 @@ Dropout -, +%0Afrom .linear import Linear%0A
b9848aba428c1c7c99a1fef64ff56b940abb9eb9
Remove num option from fetch_recent tweets
ditto/twitter/management/commands/fetch_twitter_tweets.py
ditto/twitter/management/commands/fetch_twitter_tweets.py
# coding: utf-8 import argparse from django.core.management.base import BaseCommand, CommandError from ...fetch import FetchTweets class Command(BaseCommand): """fetches tweets from Twitter. Fetch recent tweets since the last fetch, from all accounts: ./manage.py fetch_twitter_tweets --recent Fetch 20 recent tweets, from all accounts: ./manage.py fetch_twitter_tweets --recent=20 Fetch recent tweets since the last fetch, from one account: ./manage.py fetch_twitter_tweets --recent --account=philgyford Fetch 30 most recent favorited tweets, from all accounts: ./manage.py fetch_twitter_tweets --favorites=30 Fetch 30 most recent favorited tweets by one account: ./manage.py fetch_twitter_tweets --favorites=30 --account=philgyford """ help = "Fetches recent and favorited tweets from Twitter" def add_arguments(self, parser): parser.add_argument( '--favorites', action='store', default=False, help='Fetch the most recent favorited tweets, e.g. "10".' ) parser.add_argument( '--recent', action='store_true', default=False, help='Fetch the most recent tweets, e.g. "10". Leave blank for all tweets since last fetch.' ) parser.add_argument( '--account', action='store', default=False, help='Only fetch for one Twitter account.', ) def handle(self, *args, **options): # We might be fetching for a specific account or all (None). account = options['account'] if options['account'] else None; if options['favorites']: results = FetchTweets().fetch_favorites( num=options['favorites'], screen_name=account) elif options['recent']: results = FetchTweets().fetch_recent(screen_name=account) elif options['account']: raise CommandError("Specify --recent or --favorites as well as --account.") else: raise CommandError("Specify --recent or --favorites, either with an optional number.") # results should be a list of dicts. # Each dict is for one account. # Each dict will look like either: # { 'account': 'thescreename', # 'success': True, # 'fetched': 200, # The number of tweets fetched # } # or: # { 'account': 'thescreename', # 'success': False, # 'message': 'There was an error fetching data because blah', #} for result in results: if result['success']: noun = 'tweet' if result['fetched'] == 1 else 'tweets' self.stdout.write('%s: Fetched %s %s' % ( result['account'], result['fetched'], noun)) else: self.stderr.write('%s: Failed to fetch tweets: %s' % ( result['account'], result['message']))
Python
0.000025
@@ -319,11 +319,8 @@ tch -20 rece @@ -398,11 +398,8 @@ cent -=20 %0A%0A @@ -2128,21 +2128,28 @@ ites -, either with +. Favorites can have an @@ -2164,16 +2164,34 @@ number. + eg --favorites=20 %22)%0A%0A
1cf3003518ceceefcc7d518ca4afbc5571bc18e7
Use get_or_add_addon
website/addons/dataverse/views/config.py
website/addons/dataverse/views/config.py
# -*- coding: utf-8 -*- import httplib as http from flask import request from modularodm import Q from modularodm.storage.base import KeyExistsException from framework.exceptions import HTTPError from framework.auth.decorators import must_be_logged_in from website.project import decorators from website.util.sanitize import assert_clean from website.addons.dataverse import client from website.addons.dataverse.provider import DataverseProvider from website.addons.dataverse.serializer import DataverseSerializer from website.oauth.models import ExternalAccount @must_be_logged_in def dataverse_get_user_accounts(auth): """ Returns the list of all of the current user's authorized Dataverse accounts """ return DataverseSerializer( user_settings=auth.user.get_addon('dataverse') ).serialized_user_settings @must_be_logged_in def dataverse_add_user_account(auth, **kwargs): """Verifies new external account credentials and adds to user's list""" user = auth.user provider = DataverseProvider() host = request.json.get('host').rstrip('/') api_token = request.json.get('api_token') # Verify that credentials are valid client.connect_or_401(host, api_token) # Note: `DataverseSerializer` expects display_name to be a URL try: provider.account = ExternalAccount( provider=provider.short_name, provider_name=provider.name, display_name=host, # no username; show host oauth_key=host, # hijacked; now host oauth_secret=api_token, # hijacked; now api_token provider_id=api_token, # Change to username if Dataverse allows ) provider.account.save() except KeyExistsException: # ... or get the old one provider.account = ExternalAccount.find_one( Q('provider', 'eq', provider.short_name) & Q('provider_id', 'eq', api_token) ) assert provider.account is not None if provider.account not in user.external_accounts: user.external_accounts.append(provider.account) user.save() user_settings = user.get_addon('dataverse') if not user_settings: user.add_addon(addon_name='dataverse', auth=auth) user.save() return {} @must_be_logged_in @decorators.must_be_valid_project @decorators.must_have_addon('dataverse', 'node') def dataverse_get_config(node_addon, auth, **kwargs): """API that returns the serialized node settings.""" result = DataverseSerializer( user_settings=auth.user.get_addon('dataverse'), node_settings=node_addon, ).serialized_node_settings return {'result': result}, http.OK @decorators.must_have_permission('write') @decorators.must_have_addon('dataverse', 'user') @decorators.must_have_addon('dataverse', 'node') def dataverse_get_datasets(node_addon, **kwargs): """Get list of datasets from provided Dataverse alias""" alias = request.json.get('alias') connection = client.connect_from_settings(node_addon) dataverse = client.get_dataverse(connection, alias) datasets = client.get_datasets(dataverse) ret = { 'alias': alias, # include alias to verify dataset container 'datasets': [{'title': dataset.title, 'doi': dataset.doi} for dataset in datasets], } return ret, http.OK @decorators.must_have_permission('write') @decorators.must_have_addon('dataverse', 'user') @decorators.must_have_addon('dataverse', 'node') def dataverse_set_config(node_addon, auth, **kwargs): """Saves selected Dataverse and dataset to node settings""" user_settings = node_addon.user_settings user = auth.user if user_settings and user_settings.owner != user: raise HTTPError(http.FORBIDDEN) try: assert_clean(request.json) except AssertionError: # TODO: Test me! raise HTTPError(http.NOT_ACCEPTABLE) alias = request.json.get('dataverse').get('alias') doi = request.json.get('dataset').get('doi') if doi is None: return HTTPError(http.BAD_REQUEST) connection = client.connect_from_settings(node_addon) dataverse = client.get_dataverse(connection, alias) dataset = client.get_dataset(dataverse, doi) node_addon.dataverse_alias = dataverse.alias node_addon.dataverse = dataverse.title node_addon.dataset_doi = dataset.doi node_addon.dataset_id = dataset.id node_addon.dataset = dataset.title node = node_addon.owner node.add_log( action='dataverse_dataset_linked', params={ 'project': node.parent_id, 'node': node._primary_key, 'dataset': dataset.title, }, auth=auth, ) node_addon.save() return {'dataverse': dataverse.title, 'dataset': dataset.title}, http.OK
Python
0.000017
@@ -2126,91 +2126,91 @@ -user_settings = user.get_addon('dataverse')%0A if not user_settings:%0A +# Need to ensure that the user has dataverse enabled at this point%0A - user. +get_or_ add_ @@ -2219,19 +2219,8 @@ don( -addon_name= 'dat @@ -2239,20 +2239,16 @@ h=auth)%0A - user
fda19eae8abd54dcfd5dd2d4e878a4e969528261
update FAIL
rasa/cli/utils.py
rasa/cli/utils.py
import os import sys from typing import Any, Callable, Dict, Optional, Text, List import logging from rasa.constants import DEFAULT_MODELS_PATH logger = logging.getLogger(__name__) def get_validated_path( current: Optional[Text], parameter: Text, default: Optional[Text] = None, none_is_valid: bool = False, ) -> Optional[Text]: """Check whether a file path or its default value is valid and returns it. Args: current: The parsed value. parameter: The name of the parameter. default: The default value of the parameter. none_is_valid: `True` if `None` is valid value for the path, else `False`` Returns: The current value if it was valid, else the default value of the argument if it is valid, else `None`. """ if current is None or current is not None and not os.path.exists(current): if default is not None and os.path.exists(default): reason_str = "'{}' not found.".format(current) if current is None: reason_str = "Parameter '{}' not set.".format(parameter) logger.debug( "{} Using default location '{}' instead.".format(reason_str, default) ) current = default elif none_is_valid: current = None else: cancel_cause_not_found(current, parameter, default) return current def missing_config_keys(path: Text, mandatory_keys: List[Text]) -> List: import rasa.utils.io if not os.path.exists(path): return mandatory_keys config_data = rasa.utils.io.read_yaml_file(path) return [k for k in mandatory_keys if k not in config_data or config_data[k] is None] def cancel_cause_not_found( current: Optional[Text], parameter: Text, default: Optional[Text] ) -> None: """Exits with an error because the given path was not valid. Args: current: The path given by the user. parameter: The name of the parameter. default: The default value of the parameter. """ default_clause = "" if default: default_clause = "use the default location ('{}') or ".format(default) print_error( "The path '{}' does not exist. Please make sure to {}specify it" " with '--{}'.".format(current, default_clause, parameter) ) exit(1) def parse_last_positional_argument_as_model_path() -> None: """Fixes the parsing of a potential positional model path argument.""" import sys if ( len(sys.argv) >= 2 and sys.argv[1] in ["run", "shell", "interactive"] and not sys.argv[-2].startswith("-") and os.path.exists(sys.argv[-1]) ): sys.argv.append(sys.argv[-1]) sys.argv[-2] = "--model" def create_output_path( output_path: Text = DEFAULT_MODELS_PATH, prefix: Text = "", fixed_name: Optional[Text] = None, ) -> Text: """Creates an output path which includes the current timestamp. Args: output_path: The path where the model should be stored. fixed_name: Name of the model. prefix: A prefix which should be included in the output path. Returns: The generated output path, e.g. "20191201-103002.tar.gz". """ import time if output_path.endswith("tar.gz"): return output_path else: if fixed_name: name = fixed_name else: time_format = "%Y%m%d-%H%M%S" name = time.strftime(time_format) file_name = "{}{}.tar.gz".format(prefix, name) return os.path.join(output_path, file_name) def minimal_kwargs( kwargs: Dict[Text, Any], func: Callable, excluded_keys: Optional[List] = None ) -> Dict[Text, Any]: """Returns only the kwargs which are required by a function. Keys, contained in the exception list, are not included. Args: kwargs: All available kwargs. func: The function which should be called. excluded_keys: Keys to exclude from the result. Returns: Subset of kwargs which are accepted by `func`. """ from rasa.utils.common import arguments_of excluded_keys = excluded_keys or [] possible_arguments = arguments_of(func) return { k: v for k, v in kwargs.items() if k in possible_arguments and k not in excluded_keys } class bcolors(object): HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[" \ "" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" def wrap_with_color(*args: Any, color: Text): return color + " ".join(str(s) for s in args) + bcolors.ENDC def print_color(*args: Any, color: Text): print (wrap_with_color(*args, color=color)) def print_success(*args: Any): print_color(*args, color=bcolors.OKGREEN) def print_info(*args: Any): print_color(*args, color=bcolors.OKBLUE) def print_warning(*args: Any): print_color(*args, color=bcolors.WARNING) def print_error(*args: Any): print_color(*args, color=bcolors.FAIL) def signal_handler(sig, frame): print ("Goodbye 👋") sys.exit(0)
Python
0.000362
@@ -4516,24 +4516,11 @@ 033%5B -%22 %5C%0A %22 +91m %22%0A
9cb10ef23ac0390cce9bad69532436471353181f
Fix invalid code in clean-up on error situation.
flumotion/component/encoders/h264/h264.py
flumotion/component/encoders/h264/h264.py
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. import gst from flumotion.common import gstreamer, messages, errors from flumotion.component import feedcomponent from flumotion.common.i18n import N_, gettexter T_ = gettexter() class H264Encoder(feedcomponent.EncoderComponent): checkTimestamp = True checkOffset = True profiles = [ 'base', 'cif', 'main', 'svcd', 'd1', 'high', 'dvd', 'hddvd', 'bd', 'bdmain', 'psp', '720p', '1080i', 'ipod', 'avchd', 'iphone', '1seg', 'psp_480_270', 'psp_640_480', 'divx', 'flash_low', 'flash_high'] bitrate_mode = ['cbr', 'cqt', 'vbr'] def get_pipeline_string(self, properties): return "ffmpegcolorspace ! flumch264enc name=encoder" def configure_pipeline(self, pipeline, properties): self.debug('configure_pipeline') element = pipeline.get_by_name('encoder') #FIXME: Default profile should be 'base' but we use 'flash_high' profile = properties.get('profile') if profile is None: m = messages.Warning( T_(N_("Encoding profile not specified. Using 'flash_high' " "as default.\n"))) self.addMessage(m) profile = 'flash_high' self._set_property('profile', profile, element) maxKFDistance = properties.get('max-keyframe-distance') minKFDistance = properties.get('min-keyframe-distance') self._kfDistance = 0 if maxKFDistance == minKFDistance: self._kfDistance = maxKFDistance if properties.get('sync-on-offset', False) and self._kfDistance == 0: m = messages.Error(T_(N_( "The sync-on-offset property can only be set if " "max-keyframe-distance and " "min-keyframe-distance both set with the same value."))) self.addMessage(m) props = ('bitrate', 'bitrate-mode', 'byte-stream', 'max-keyframe-distance', 'min-keyframe-distance', 'sync-on-offset') for p in props: self._set_property(p, properties.get(p), element) # for max-bitrate use in this order: 'max-bitrate', 'bitrate' or None self._set_property('max-bitrate', properties.get('max-bitrate', properties.get('bitrate', None)), element) def _set_property(self, prop, value, element): if value is None: self.debug('No %s set, using default value', prop) return if prop == 'bitrate': self.debug("Setting bitrate to %s", value) element.set_property(prop, value) if prop == 'max-bitrate': self.debug("Setting max bitrate to %s", value) element.set_property(prop, value) if prop == 'bitrate-mode': if value not in self.bitrate_mode: m = messages.Error(T_(N_( "The bitrate mode '%s' does not match any of the encoder's " "available modes"), value), mid='profile') self.addMessage(m) raise errors.ComponentSetupHandledError() self.debug("Setting bitrate mode to %s", value) element.set_property(prop, value) if prop == 'byte-stream': if value is True: self.debug("Setting byte-stream format") element.set_property('es', 1) if prop in ('max-keyframe-distance', 'min-keyframe-distance'): if gstreamer.get_plugin_version('flumch264enc') <= (0, 10, 5, 0): m = messages.Warning( T_(N_("Versions up to and including %s of the '%s' " "cannot set this property.\n"), '0.10.5', 'flumch264enc')) self.addMessage(m) return self.debug("Setting %s to %s", prop, value) element.set_property(prop, value) if prop == 'profile': if value not in self.profiles: m = messages.Error(T_(N_( "The profile '%s' does not match any of the encoder's " "available profiles"), value), mid='profile') self.addMessage(m) raise errors.ComponentSetupHandledError() self.debug("Setting h264 '%s' profile", value) element.set_property(prop, value) # Adobe recommends using a keyframe distance equivalent to 10 # seconds and the GStreamer component doesn't set it. For live # we want to have at least on keyframe each 3 seconds # See priv#7131 if value in ['flash_high', 'flash_low']: #FIXME: Supposing we have a PAL input with 25fps element.set_property('max-keyframe-distance', 75) if prop == 'sync-on-offset' and value is True: self._synced = False self._tsToOffset = {} sp = element.get_pad("sink") self._sinkID = sp.add_buffer_probe(self._sinkPadProbe) sp = element.get_pad("src") self._srcID = sp.add_buffer_probe(self._srcPadProbe) def _sinkPadProbe(self, pad, buffer): offset = buffer.offset if self._synced: self._tsToOffset[buffer.timestamp] = buffer.offset return True elif offset == gst.BUFFER_OFFSET_NONE: m = messages.Warning(T_(N_( "Can't sync on keyframes, the input source does not write the" " buffer offset."))) self.addMessage(m) pad.remove_buffer_probe(self._sinkID) pad.get_peer().remove_buffer_probe(self._srcID) return True # Offset start at 1 elif not self._synced and (offset - 1) % self._kfDistance == 0: self.info("Syncing encoder with frame:%s" % offset) self._synced = True self._tsToOffset[buffer.timestamp] = buffer.offset return True return False def _srcPadProbe(self, _, buffer): buffer.offset = self._tsToOffset.pop(buffer.timestamp) # HACK: Use OFFSET_END to write the keyframes' offset buffer.offset_end = (buffer.offset - 1) / self._kfDistance return True
Python
0.00001
@@ -6257,12 +6257,29 @@ et_p -eer( +arent().get_pad(%22src%22 ).re
2c6a495351de52fe1de0b36d73f22e777ef3d08c
fix sqlalchemy url with sqlite prefix
wsgi/todopyramid/todopyramid/__init__.py
wsgi/todopyramid/todopyramid/__init__.py
import os from pyramid.config import Configurator from sqlalchemy import engine_from_config from .models import ( DBSession, Base, ) from .views import get_user def get_db_session(request): """return thread-local DB session""" return DBSession def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ # Persona DNS settings['persona.audiences'] = '%(OPENSHIFT_APP_DNS)s' % os.environ # OpenShift Settings settings['sqlalchemy.url'] = '%(OPENSHIFT_DATA_DIR)s/todopyramid.sqlite' % os.environ engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) Base.metadata.bind = engine config = Configurator( settings=settings, root_factory='todopyramid.models.RootFactory', ) includeme(config) # scan modules for config descriptors config.scan() return config.make_wsgi_app() def includeme(config): """we use this concept to include routes and configuration setup in test cases http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/testing.html#creating-integration-tests """ config.add_static_view('static', 'static', cache_max_age=3600) # Misc. views config.add_route('home', '/') config.add_route('about', '/about') # Users config.add_route('account', '/account') # Viewing todo lists config.add_route('todos', '/todos') config.add_route('tags', '/tags') config.add_route('taglist', '/tags/{tag_name}') # AJAX config.add_route('todo', '/todos/{todo_id}') config.add_route('delete.task', '/delete.task/{todo_id}') config.add_route('tags.autocomplete', '/tags.autocomplete') # make DB session a request attribute # http://blog.safaribooksonline.com/2014/01/07/building-pyramid-applications/ config.add_request_method(get_db_session, 'db', reify=True) # Making A User Object Available as a Request Attribute # http://docs.pylonsproject.org/projects/pyramid_cookbook/en/latest/auth/user_object.html config.add_request_method(get_user, 'user', reify=True)
Python
0.001027
@@ -511,24 +511,34 @@ my.url'%5D = ' +sqlite:/// %25(OPENSHIFT_
926df1bc4dee9fc613f0fb31bb8c579943008645
Update plot_label_propagation_digits.py (#22725)
examples/semi_supervised/plot_label_propagation_digits.py
examples/semi_supervised/plot_label_propagation_digits.py
""" =================================================== Label Propagation digits: Demonstrating performance =================================================== This example demonstrates the power of semisupervised learning by training a Label Spreading model to classify handwritten digits with sets of very few labels. The handwritten digit dataset has 1797 total points. The model will be trained using all points, but only 30 will be labeled. Results in the form of a confusion matrix and a series of metrics over each class will be very good. At the end, the top 10 most uncertain predictions will be shown. """ # Authors: Clay Woolam <[email protected]> # License: BSD import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn import datasets from sklearn.semi_supervised import LabelSpreading from sklearn.metrics import confusion_matrix, classification_report digits = datasets.load_digits() rng = np.random.RandomState(2) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:340]] y = digits.target[indices[:340]] images = digits.images[indices[:340]] n_total_samples = len(y) n_labeled_points = 40 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # ############################################################################# # Shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 # ############################################################################# # Learn with LabelSpreading lp_model = LabelSpreading(gamma=0.25, max_iter=20) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print( "Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples) ) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # ############################################################################# # Calculate uncertainty values for each transduced distribution pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # ############################################################################# # Pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] # ############################################################################# # Plot f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title( "predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index]) ) f.suptitle("Learning with small amount of labeled data") plt.show()
Python
0
@@ -675,357 +675,504 @@ SD%0A%0A -import numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Afrom scipy import stats%0A%0Afrom sklearn import datasets%0Afrom sklearn.semi_supervised import LabelSpreading%0A%0Afrom sklearn.metrics import confusion_matrix, classification_report%0A%0Adigits = datasets.load_digits()%0Arng = np.random.RandomState(2)%0Aindices = np.arange(len(digits.data))%0Arng.shuffle(indices)%0A +# %25%25%0A# Data generation%0A# ---------------%0A#%0A# We use the digits dataset. We only use a subset of randomly selected samples.%0Afrom sklearn import datasets%0Aimport numpy as np%0A%0Adigits = datasets.load_digits()%0Arng = np.random.RandomState(2)%0Aindices = np.arange(len(digits.data))%0Arng.shuffle(indices)%0A%0A# %25%25%0A#%0A# We selected 340 samples of which only 40 will be associated with a known label.%0A# Therefore, we store the indices of the 300 other samples for which we are not%0A# supposed to know their labels. %0AX = @@ -1407,85 +1407,10 @@ %0A%0A# -############################################################################# +%25%25 %0A# S @@ -1490,113 +1490,272 @@ %0A%0A# -#############################################################################%0A# Learn with LabelSpreading +%25%25%0A# Semi-supervised learning%0A# ------------------------%0A#%0A# We fit a :class:%60~sklearn.semi_supervised.LabelSpreading%60 and use it to predict%0A# the unknown labels.%0Afrom sklearn.semi_supervised import LabelSpreading%0Afrom sklearn.metrics import classification_report%0A %0Alp_ @@ -1920,88 +1920,8 @@ t%5D%0A%0A -cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)%0A%0A prin @@ -2077,16 +2077,45 @@ les)%0A)%0A%0A +# %25%25%0A# Classification report%0A print(cl @@ -2171,15 +2171,60 @@ ))%0A%0A -print(%22 +# %25%25%0A# Confusion matrix%0Afrom sklearn.metrics import Conf @@ -2232,172 +2232,299 @@ sion - m +M atrix -%22)%0Aprint(cm)%0A%0A# #############################################################################%0A# Calculate uncertainty values for each transduced distribution +Display%0A%0AConfusionMatrixDisplay.from_predictions(%0A true_labels, predicted_labels, labels=lp_model.classes_%0A)%0A%0A# %25%25%0A# Plot the most uncertain predictions%0A# -----------------------------------%0A#%0A# Here, we will pick and show the 10 most uncertain predictions.%0Afrom scipy import stats%0A %0Apre @@ -2605,85 +2605,10 @@ %0A%0A# -############################################################################# +%25%25 %0A# P @@ -2704,92 +2704,50 @@ %0A%0A# -#############################################################################%0A# Plot +%25%25%0A# Plot%0Aimport matplotlib.pyplot as plt%0A %0Af =
ebddb1005d3eda45f11eef08d83c271a657443b2
Add functional tests for volume set size
functional/tests/volume/v1/test_volume.py
functional/tests/volume/v1/test_volume.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class VolumeTests(test.TestCase): """Functional tests for volume. """ NAME = uuid.uuid4().hex OTHER_NAME = uuid.uuid4().hex HEADERS = ['"Display Name"'] FIELDS = ['display_name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('volume create --size 1 ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): # Rename test raw_output = cls.openstack( 'volume set --name ' + cls.OTHER_NAME + ' ' + cls.NAME) cls.assertOutput('', raw_output) # Delete test raw_output = cls.openstack('volume delete ' + cls.OTHER_NAME) cls.assertOutput('', raw_output) def test_volume_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('volume list' + opts) self.assertIn(self.NAME, raw_output) def test_volume_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output) def test_volume_properties(self): raw_output = self.openstack( 'volume set --property a=b --property c=d ' + self.NAME) self.assertEqual("", raw_output) opts = self.get_show_opts(["properties"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("a='b', c='d'\n", raw_output) raw_output = self.openstack('volume unset --property a ' + self.NAME) self.assertEqual("", raw_output) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("c='d'\n", raw_output) def test_volume_set(self): raw_output = self.openstack( 'volume set --description RAMAC ' + self.NAME) opts = self.get_show_opts(["display_description", "display_name"]) raw_output = self.openstack('volume show ' + self.NAME + opts) self.assertEqual("RAMAC\n" + self.NAME + "\n", raw_output)
Python
0.000005
@@ -2724,28 +2724,339 @@ lf.NAME + %22%5Cn%22, raw_output)%0A +%0A def test_volume_set_size(self):%0A raw_output = self.openstack(%0A 'volume set --size 2 ' + self.NAME)%0A opts = self.get_show_opts(%5B%22display_name%22, %22size%22%5D)%0A raw_output = self.openstack('volume show ' + self.NAME + opts)%0A self.assertEqual(self.NAME + %22%5Cn2%5Cn%22, raw_output)%0A
3284f18168ce274516dc51293376571f5dfada18
copy the hasher from FrozenPhoneNumber
phonenumber_field/phonenumber.py
phonenumber_field/phonenumber.py
#-*- coding: utf-8 -*- import phonenumbers from django.core import validators from phonenumbers.phonenumberutil import NumberParseException from django.conf import settings class PhoneNumber(phonenumbers.phonenumber.PhoneNumber): """ A extended version of phonenumbers.phonenumber.PhoneNumber that provides some neat and more pythonic, easy to access methods. This makes using a PhoneNumber instance much easier, especially in templates and such. """ format_map = { 'E164': phonenumbers.PhoneNumberFormat.E164, 'INTERNATIONAL': phonenumbers.PhoneNumberFormat.INTERNATIONAL, 'NATIONAL': phonenumbers.PhoneNumberFormat.NATIONAL, 'RFC3966': phonenumbers.PhoneNumberFormat.RFC3966, } @classmethod def from_string(cls, phone_number, region=None): phone_number_obj = cls() if region is None: region = getattr(settings, 'PHONENUMBER_DEFAULT_REGION', None) phonenumbers.parse(number=phone_number, region=region, keep_raw_input=True, numobj=phone_number_obj) return phone_number_obj def __unicode__(self): format_string = getattr(settings, 'PHONENUMBER_DEFAULT_FORMAT', 'E164') fmt = self.format_map[format_string] if self.is_valid(): return self.format_as(fmt) return self.raw_input def __str__(self): return str(unicode(self)) def original_unicode(self): return super(PhoneNumber, self).__unicode__() def is_valid(self): """ checks whether the number supplied is actually valid """ return phonenumbers.is_valid_number(self) def format_as(self, format): if self.is_valid(): return phonenumbers.format_number(self, format) else: return self.raw_input @property def as_international(self): return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL) @property def as_e164(self): return self.format_as(phonenumbers.PhoneNumberFormat.E164) @property def as_national(self): return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL) @property def as_rfc3966(self): return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966) def __len__(self): return len(self.__unicode__()) def __eq__(self, other): if type(other) == PhoneNumber: return self.as_e164 == other.as_e164 else: return super(PhoneNumber, self).__eq__(other) def __hash__(self): return hash(self.__unicode__()) def to_python(value): if value in validators.EMPTY_VALUES: # None or '' phone_number = None elif value and isinstance(value, basestring): try: phone_number = PhoneNumber.from_string(phone_number=value) except NumberParseException, e: # the string provided is not a valid PhoneNumber. phone_number = PhoneNumber(raw_input=value) elif isinstance(value, phonenumbers.phonenumber.PhoneNumber) and \ not isinstance(value, PhoneNumber): phone_number = PhoneNumber(value) elif isinstance(value, PhoneNumber): phone_number = value return phone_number
Python
0
@@ -2584,33 +2584,356 @@ rn hash( +( self. -__unicode__( +country_code,%0A self.national_number,%0A self.extension,%0A self.italian_leading_zero,%0A self.number_of_leading_zeros,%0A self.raw_input,%0A self.country_code_source,%0A self.preferred_domestic_carrier_code ))%0A%0A%0Adef
fe6da72e141d7cbf431f9b66f23b4b03a8d456a4
fix hanging aggregator
yandextank/aggregator/tank_aggregator.py
yandextank/aggregator/tank_aggregator.py
""" Core module to calculate aggregate data """ import json import logging import queue as q from pkg_resources import resource_string from .aggregator import Aggregator, DataPoller from .chopper import TimeChopper from yandextank.common.interfaces import AggregateResultListener, StatsReader from netort.data_processing import Drain, Chopper, get_nowait_from_queue logger = logging.getLogger(__name__) class LoggingListener(AggregateResultListener): """ Log aggregated results """ def on_aggregated_data(self, data, stats): logger.info("Got aggregated sample:\n%s", json.dumps(data, indent=2)) logger.info("Stats:\n%s", json.dumps(stats, indent=2)) class TankAggregator(object): """ Plugin that manages aggregation and stats collection """ SECTION = 'aggregator' @staticmethod def get_key(): return __file__ def __init__(self, generator): # AbstractPlugin.__init__(self, core, cfg) """ :type generator: GeneratorPlugin """ self.generator = generator self.listeners = [] # [LoggingListener()] self.results = q.Queue() self.stats = q.Queue() self.data_cache = {} self.stat_cache = {} self.reader = None self.stats_reader = None self.drain = None self.stats_drain = None @staticmethod def load_config(): return json.loads(resource_string(__name__, 'config/phout.json').decode('utf8')) def start_test(self, poll_period=1): self.reader = self.generator.get_reader() self.stats_reader = self.generator.get_stats_reader() aggregator_config = self.load_config() verbose_histogram = True if verbose_histogram: logger.info("using verbose histogram") if self.reader and self.stats_reader: pipeline = Aggregator( TimeChopper( DataPoller(source=self.reader, poll_period=poll_period), cache_size=3), aggregator_config, verbose_histogram) self.drain = Drain(pipeline, self.results) self.drain.start() self.stats_drain = Drain( Chopper(DataPoller( source=self.stats_reader, poll_period=poll_period)), self.stats) self.stats_drain.start() else: logger.warning("Generator not found. Generator must provide a reader and a stats_reader interface") def _collect_data(self, end=False): """ Collect data, cache it and send to listeners """ data = get_nowait_from_queue(self.results) stats = get_nowait_from_queue(self.stats) logger.debug("Data timestamps: %s" % [d.get('ts') for d in data]) logger.debug("Stats timestamps: %s" % [d.get('ts') for d in stats]) for item in data: ts = item['ts'] if ts in self.stat_cache: # send items data_item = item stat_item = self.stat_cache.pop(ts) self.__notify_listeners(data_item, stat_item) else: self.data_cache[ts] = item for item in stats: ts = item['ts'] if ts in self.data_cache: # send items data_item = self.data_cache.pop(ts) stat_item = item self.__notify_listeners(data_item, stat_item) else: self.stat_cache[ts] = item if end and len(self.data_cache) > 0: logger.info('Timestamps without stats:') for ts, data_item in sorted(self.data_cache.items(), key=lambda i: i[0]): logger.info(ts) self.__notify_listeners(data_item, StatsReader.stats_item(ts, 0, 0)) def is_test_finished(self): self._collect_data() return -1 def end_test(self, retcode): retcode = self.generator.end_test(retcode) if self.reader: logger.debug('Closing gun reader') self.reader.close() if self.stats_reader: logger.debug('Closing stats reader') self.stats_reader.close() if self.drain: logger.debug('Waiting for gun drain to finish') self.drain.wait() logger.debug('Waiting for stats drain to finish') self.stats_drain.wait() logger.debug('Collecting remaining data') self._collect_data(end=True) if self.drain: self.drain.join() self.stats_drain.join() return retcode def add_result_listener(self, listener): self.listeners.append(listener) def __notify_listeners(self, data, stats): """ notify all listeners about aggregate data and stats """ for listener in self.listeners: listener.on_aggregated_data(data, stats)
Python
0.000001
@@ -4326,28 +4326,28 @@ self.drain. -wait +join ()%0A @@ -4428,20 +4428,20 @@ s_drain. -wait +join ()%0A @@ -4485,16 +4485,16 @@ data')%0A + @@ -4526,98 +4526,8 @@ ue)%0A - if self.drain:%0A self.drain.join()%0A self.stats_drain.join()%0A%0A
61610e7024be54fc3ba9a67c03db7ebb0a88270b
Changing the default view graph to be 229
ScoutingWebsite2013/Scouting2011/views.py
ScoutingWebsite2013/Scouting2011/views.py
from django.shortcuts import render from django.db.models import Avg, Sum from django.http.response import HttpResponse from Scouting2011.models import Team, Match import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg from matplotlib.font_manager import FontProperties # Create your views here. def __get_team_metrics(team): metrics = team.scoreresult_set.aggregate(Avg('TubesDropped' ), Avg('LowTubesHung' ), Avg('MidTubesHung' ), Avg('HighTubesHung' ), Avg('TubesRecieved' ), Avg('Penelties' ), Avg('MiniBotFinish' ), Avg('ScoredUberTube' ), Sum('DeployedMinibot' ), Sum('WasOffensive' ), Sum('WasScouted' ), Sum('BrokeBadly' ), Sum('Comments' ), ) #Format all of the numbers. If we haven't scouted the team, None will be returned. Turn that into NA for key in metrics: if metrics[key] == None: metrics[key] = "NA" elif "__avg" in key: metrics[key] = "{:10.2f}".format(metrics[key]) return metrics def index(request): return render(request, 'Scouting2011/index.html') def all_teams(request): all_teams = Team.objects.all() teams_with_avg = [] for team in all_teams: metrics = __get_team_metrics(team) team_with_avg = {"id": team.id, "teamNumber": team.teamNumber, "matches_scouted": team.scoreresult_set.count(), "metrics": metrics, } teams_with_avg.append(team_with_avg) context = {"teams": teams_with_avg} return render(request, 'Scouting2011/all_teams.html', context) def view_team(request, team_id): this_team = Team.objects.get(id=team_id) metrics = __get_team_metrics(this_team) match_list = [] for sr in this_team.scoreresult_set.all(): match_list.append(sr.match) print metrics context = {"id": this_team.id, "teamNumber": this_team.teamNumber, "metrics": metrics, "match_list": match_list, } return render(request, 'Scouting2011/single_team.html', context) def all_matches(request): all_matches = Match.objects.all() context = {"matches": all_matches} return render(request, 'Scouting2011/all_matches.html', context) def view_match(request, match_id): this_match = Match.objects.get(id=match_id) results = this_match.scoreresult_set.all() context = {"id": this_match.id, "matchNumber": this_match.matchNumber, "results": results, } return render(request, 'Scouting2011/single_match.html', context) def view_graph(request, team_ids=[], fields=[]): team_ids = [3, 4] fields = "auton_score,high_goals" team_numbers = [] for team_id in team_ids: team = Team.objects.get(id=team_id) team_numbers.append(team.teamNumber) context = {"team_ids": ",".join(str(x) for x in team_ids), "team_numbers": team_numbers, "field_list": fields, } return render(request, 'Scouting2011/view_graph.html', context) def create_metrics_plot(request, team_ids, fields): team_ids = [str(x) for x in team_ids.split(",")] fields = [str(x) for x in fields.split(",")] f = plt.figure(figsize=(6,6)) legend_handles = [] for team_id in team_ids: team = Team.objects.get(id=team_id) for field in fields: metric = [] for result in team.scoreresult_set.all(): metric.append(getattr(result, field)) print field hand, = plt.plot(metric, label="Team %s, %s" % (team.teamNumber, field)) legend_handles.append(hand) fontP = FontProperties() fontP.set_size('small') plt.legend(handles=legend_handles, prop=fontP) plt.xlabel("Match") matplotlib.pyplot.close(f) canvas = FigureCanvasAgg(f) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response
Python
0.999998
@@ -3473,12 +3473,9 @@ = %5B -3, 4 +2 %5D%0A @@ -3490,30 +3490,47 @@ = %22 -auton_score,high_goals +HighTubesHung,ScoredUberTube,BrokeBadly %22%0A