commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
56150b1aef230cd7a7fb4cd8f44dd8884934d716 | set value for field expected_date in purchase.order | addons/purchase/report/purchase_report.py | addons/purchase/report/purchase_report.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from osv import fields,osv
import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.date('Order Date', readonly=True, help="Date on which this document has been created"),
'name': fields.char('Year',size=64,required=False, readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state': fields.selection([('draft', 'Request for Quotation'),
('wait', 'Waiting'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order State', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'name desc,price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
select
min(l.id) as id,
s.date_order as date,
to_char(s.date_order, 'YYYY') as name,
to_char(s.date_order, 'MM') as month,
to_char(s.date_order, 'YYYY-MM-DD') as day,
s.state,
s.date_approve,
date_trunc('day',s.minimum_planned_date) as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
s.warehouse_id as warehouse_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
(l.price_unit*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit*l.product_qty) / NULLIF(t.standard_price*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(t.standard_price*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order s
left join purchase_order_line l on (s.id=l.order_id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
where l.product_id is not null
group by
s.company_id,
s.create_uid,
s.partner_id,
l.product_qty,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
date_trunc('day',s.minimum_planned_date),
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
to_char(s.date_order, 'YYYY'),
to_char(s.date_order, 'MM'),
to_char(s.date_order, 'YYYY-MM-DD'),
s.state,
s.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
purchase_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 | @@ -4539,33 +4539,16 @@
-date_trunc('day',
s.minimu
@@ -4561,17 +4561,16 @@
ned_date
-)
as expe
@@ -6715,25 +6715,8 @@
-date_trunc('day',
s.mi
@@ -6733,17 +6733,16 @@
ned_date
-)
,%0A
|
708f916be9060d6df17f53ee0e4acc59fd742c9c | Add kwargs to websocket message constructor (#1328) | opsdroid/connector/websocket/__init__.py | opsdroid/connector/websocket/__init__.py | """A connector which allows websocket connections."""
import logging
import json
import uuid
from datetime import datetime
import aiohttp
import aiohttp.web
from aiohttp import WSCloseCode
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
HEADERS = {"Access-Control-Allow-Origin": "*"}
CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
class ConnectorWebsocket(Connector):
"""A connector which allows websocket connections."""
def __init__(self, config, opsdroid=None):
"""Create the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Websocket connector."))
self.name = "websocket"
self.max_connections = self.config.get("max-connections", 10)
self.connection_timeout = self.config.get("connection-timeout", 60)
self.accepting_connections = True
self.active_connections = {}
self.available_connections = []
self.bot_name = self.config.get("bot-name", "opsdroid")
async def connect(self):
"""Connect to the chat service."""
self.accepting_connections = True
self.opsdroid.web_server.web_app.router.add_get(
"/connector/websocket/{socket}", self.websocket_handler
)
self.opsdroid.web_server.web_app.router.add_post(
"/connector/websocket", self.new_websocket_handler
)
async def disconnect(self):
"""Disconnect from current sessions."""
self.accepting_connections = False
connections_to_close = self.active_connections.copy()
for connection in connections_to_close:
await connections_to_close[connection].close(
code=WSCloseCode.GOING_AWAY, message="Server shutdown"
)
async def new_websocket_handler(self, request):
"""Handle for aiohttp creating websocket connections."""
if (
len(self.active_connections) + len(self.available_connections)
< self.max_connections
and self.accepting_connections
):
socket = {"id": str(uuid.uuid1()), "date": datetime.now()}
self.available_connections.append(socket)
return aiohttp.web.Response(
text=json.dumps({"socket": socket["id"]}), headers=HEADERS, status=200
)
return aiohttp.web.Response(
text=json.dumps("No connections available"), headers=HEADERS, status=429
)
async def websocket_handler(self, request):
"""Handle for aiohttp handling websocket connections."""
socket = request.match_info.get("socket")
available = [
item for item in self.available_connections if item["id"] == socket
]
if len(available) != 1:
return aiohttp.web.Response(
text=json.dumps("Please request a socket first"),
headers=HEADERS,
status=400,
)
if (
datetime.now() - available[0]["date"]
).total_seconds() > self.connection_timeout:
self.available_connections.remove(available[0])
return aiohttp.web.Response(
text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
)
self.available_connections.remove(available[0])
_LOGGER.debug(_("User connected to %s."), socket)
websocket = aiohttp.web.WebSocketResponse()
await websocket.prepare(request)
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
message = Message(msg.data, None, None, self)
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
_("Websocket connection closed with exception %s."),
websocket.exception(),
)
_LOGGER.info(_("websocket connection closed"))
self.active_connections.pop(socket, None)
return websocket
async def listen(self):
"""Listen for and parse new messages.
Listening is handled by the aiohttp web server so
we don't need to do anything here.
"""
@register_event(Message)
async def send_message(self, message):
"""Respond with a message."""
try:
if message.target is None:
message.target = next(iter(self.active_connections))
_LOGGER.debug(
_("Responding with: '%s' in target %s"), message.text, message.target
)
await self.active_connections[message.target].send_str(message.text)
except KeyError:
_LOGGER.error(_("No active socket for target %s"), message.target)
| Python | 0 | @@ -3747,16 +3747,21 @@
Message(
+text=
msg.data
@@ -3766,20 +3766,42 @@
ta,
+user=
None,
-None,
+target=None, connector=
self
|
4b7e6d7df8a447873bc57adfedfb6013b915190c | Fix Node.namespace_uri for py3 | cio/node.py | cio/node.py | # coding=utf-8
from __future__ import unicode_literals
from .environment import env
from .utils.formatters import ContentFormatter
from .utils.uri import URI
import six
empty = object()
class Node(object):
_formatter = ContentFormatter()
def __init__(self, uri, content=None, **meta):
self.env = env.state
self._uri = [uri, URI(uri)]
self._content = [content]
self.meta = meta
def __repr__(self):
return '<Node: %s>' % self.uri
def __bytes__(self):
content = self.render()
if isinstance(content, six.text_type):
content = content.encode('utf-8')
return content or b''
def __unicode__(self):
return self.render() or ''
__str__ = __bytes__ if six.PY2 else __unicode__
def render(self, **context):
if self.content is not None:
if context:
return self._formatter.format(self.content, **context)
else:
return self.content
def get_uri(self):
return self._uri[-1]
def set_uri(self, uri):
if uri != self.get_uri():
self._uri.append(URI(uri))
uri = property(get_uri, set_uri)
def get_content(self):
return self._content[-1]
def set_content(self, content):
if content != self.get_content():
self._content.append(content)
content = property(get_content, set_content)
@property
def initial(self):
return self._content[0]
@property
def initial_uri(self):
return self._uri[0]
@property
def namespace_uri(self):
"""
Finds and returns first applied URI of this node that has a namespace.
:return str: uri
"""
try:
return iter(
filter(lambda uri: URI(uri).namespace, self._uri)
).next()
except StopIteration:
return None
def for_json(self):
return {
'uri': six.text_type(self.uri),
'content': self.content,
'meta': self.meta if self.meta is not None else {}
}
| Python | 0.000765 | @@ -1767,20 +1767,20 @@
return
-iter
+next
(%0A
@@ -1789,16 +1789,21 @@
+iter(
filter(l
@@ -1843,16 +1843,17 @@
lf._uri)
+)
%0A
@@ -1862,15 +1862,8 @@
)
-.next()
%0A
|
3a42b4458f85d8f2640c34fce79c9a99a79f5323 | Revert "add second db connection to coastdat" | calc_renpass_gis/scenario_reader/db.py | calc_renpass_gis/scenario_reader/db.py | # -*- coding: utf-8 -*-
from sqlalchemy import (Column, Float, ForeignKey, Integer, MetaData, String,
Table, join, create_engine, ForeignKeyConstraint,
Boolean, DateTime, Sequence)
from sqlalchemy.orm import sessionmaker, relationship, configure_mappers
# from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.automap import automap_base
# from geoalchemy2 import Geometry, shape
import configparser as cp
# from sqlalchemy.sql import func
# from sqlalchemy.dialects import postgresql
import os.path as path
# read configuration file
FILENAME = 'config.ini'
FILE = path.join(path.expanduser("~"), '.open_eGo', FILENAME)
cfg = cp.ConfigParser()
cfg.read(FILE)
# establish db connection
section = 'Connection'
conn = create_engine(
"postgresql+psycopg2://{user}:{password}@{host}:{port}/{db}".format(
user=cfg.get(section, 'username'),
password=cfg.get(section, 'password'),
host=cfg.get(section, 'host'),
port=cfg.get(section, 'port'),
db=cfg.get(section, 'db')))
print("Connected to database.")
# establish second db connection to
# change of init file in .open_ego and Server connetion via ssh required
section2 = 'Coastdat'
conn2 = create_engine(
"postgresql+psycopg2://{user}:{password}@{host}:{port}/{db}".format(
user=cfg.get(section2, 'username'),
password=cfg.get(section2, 'password'),
host=cfg.get(section2, 'host'),
port=cfg.get(section2, 'port'),
db=cfg.get(section2, 'db')))
print("Connected to database 2.")
# map schema
session = sessionmaker(bind=conn)()
meta = MetaData()
meta.bind = conn
meta.reflect(bind=conn, schema='calc_renpass_gis',
only=['renpass_gis_scenario',
'renpass_gis_linear_transformer',
'renpass_gis_source',
'renpass_gis_sink',
'renpass_gis_storage'])
# map to classes
Base = automap_base(metadata=meta)
Base.prepare()
Scenario, LinearTransformer, Source, Sink, Storage = \
Base.classes.renpass_gis_scenario,\
Base.classes.renpass_gis_linear_transformer,\
Base.classes.renpass_gis_source,\
Base.classes.renpass_gis_sink,\
Base.classes.renpass_gis_storage
# map schema of coastdat-2
session2 = sessionmaker(bind=conn)()
meta2 = MetaData()
meta2.bind = conn2
meta2.reflect(bind=conn2, schema='coastdat',
only=['cosmoclmgrid',
'datatype',
'located',
'projection',
'scheduled',
'spatial',
'timeseries',
'typified',
'year'])
# map to classes of coastdat weather data
Coastdat = automap_base(metadata=meta2)
Coastdat.prepare()
| Python | 0 | @@ -1110,484 +1110,8 @@
%22)%0A%0A
-%0A# establish second db connection to%0A# change of init file in .open_ego and Server connetion via ssh required%0Asection2 = 'Coastdat'%0Aconn2 = create_engine(%0A %22postgresql+psycopg2://%7Buser%7D:%7Bpassword%7D@%7Bhost%7D:%7Bport%7D/%7Bdb%7D%22.format(%0A user=cfg.get(section2, 'username'),%0A password=cfg.get(section2, 'password'),%0A host=cfg.get(section2, 'host'),%0A port=cfg.get(section2, 'port'),%0A db=cfg.get(section2, 'db')))%0A%0Aprint(%22Connected to database 2.%22)%0A%0A%0A%0A
# ma
@@ -1790,558 +1790,4 @@
age%0A
-%0A# map schema of coastdat-2%0Asession2 = sessionmaker(bind=conn)()%0A%0Ameta2 = MetaData()%0Ameta2.bind = conn2%0Ameta2.reflect(bind=conn2, schema='coastdat',%0A only=%5B'cosmoclmgrid',%0A 'datatype',%0A 'located',%0A 'projection',%0A 'scheduled',%0A 'spatial',%0A 'timeseries', %0A 'typified',%0A 'year'%5D)%0A%0A%0A# map to classes of coastdat weather data%0ACoastdat = automap_base(metadata=meta2)%0ACoastdat.prepare()%0A
|
14a7d5305a3e5dfc73834cb6164def4c0706e740 | Fix : cmdline GET output | cli/client.py | cli/client.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
from __future__ import absolute_import
import zmq
from elevator.constants import *
from .errors import *
from .message import Request, ResponseHeader, Response
class Client(object):
def __init__(self, *args, **kwargs):
self.protocol = kwargs.pop('protocol', 'tcp')
self.endpoint = kwargs.pop('endpoint', '127.0.0.1:4141')
self.host = "%s://%s" % (self.protocol, self.endpoint)
self.context = None
self.socket = None
self.timeout = kwargs.pop('timeout', 10000)
self.db_uid = None
self.connect()
def setup_socket(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.XREQ)
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.setsockopt(zmq.RCVTIMEO, self.timeout)
self.socket.connect(self.host)
def teardown_socket(self):
self.socket.close()
self.context.term()
def connect(self, db_name=None, *args, **kwargs):
self.setup_socket()
db_name = 'default' if db_name is None else db_name
self.db_uid = self.send_cmd(None, 'DBCONNECT', [db_name], *args, **kwargs)[0]
self.db_name = db_name
return
def send_cmd(self, db_uid, command, arguments, *args, **kwargs):
self.socket.send_multipart([Request(db_uid=db_uid,
command=command,
args=arguments,
meta={})],)
try:
raw_header, raw_response = self.socket.recv_multipart()
header = ResponseHeader(raw_header)
response = Response(raw_response)
if header.status == FAILURE_STATUS:
return fail_with(ELEVATOR_ERROR[header.err_code], header.err_msg)
except zmq.core.error.ZMQError:
# Restore original timeout and raise
return fail_with("TimeoutError", "Server did not respond in time")
return response.datas
| Python | 0.000002 | @@ -1298,16 +1298,155 @@
return%0A%0A
+ def _format_response(self, req_cmd, res_datas):%0A if req_cmd == %22GET%22:%0A return res_datas%5B0%5D%0A return res_datas%0A%0A
def
@@ -2228,24 +2228,55 @@
return
+ self._format_response(command,
response.da
@@ -2274,13 +2274,14 @@
sponse.datas
+)
%0A
|
900b37fee45db789b413d55b497d87992c3dab00 | Remove Welcome! Flash | bakery/gitauth/views.py | bakery/gitauth/views.py | # coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from flask import (Blueprint, request, flash, g, session, redirect,
url_for, current_app)
from .models import User
from ..extensions import db, github
from flask.ext.babel import gettext as _
gitauth = Blueprint('gitauth', __name__, url_prefix='/auth')
@gitauth.after_request
def after_request(response):
return response
@gitauth.route('/me')
def me():
"""
Bypass Github authentication and login as 1st user in the database.
Usage:
Visit /auth/me to log in as the 1st user in the database, to
work offline as that user. To allow a github username to work
in this single user mode, change GITAUTH_LOGIN_LIST config property.
You need to login using GitHub at least once before this will work.
This only works if server is in debug mode.
"""
if current_app.debug:
# pylint:disable-msg=E1101
user = User.get_or_init('offline')
if user.id and user.login in current_app.config['GITAUTH_LOGIN_LIST']:
session['user_id'] = user.id
g.user = user
flash(_('Welcome to single user mode!'))
return redirect(url_for('frontend.splash'))
@gitauth.route('/login')
def login():
if session.get('user_id', None) is None:
redirect_uri = url_for('.authorized',
next=request.args.get('next') or request.referrer or None,
_external=True)
params = {'redirect_uri': redirect_uri, 'scope': 'user:email,public_repo'}
return redirect(github.get_authorize_url(**params))
else:
flash(_('Already logged in'))
return redirect(url_for('frontend.splash'))
@gitauth.route('/callback')
def authorized(next = None):
next_url = request.args.get('next') or url_for('frontend.splash')
if not 'code' in request.args:
flash(_('You did not authorize the request'))
return redirect(next_url)
redirect_uri = url_for('.authorized', _external=True)
data = dict(code=request.args['code'], redirect_uri=redirect_uri)
auth = github.get_auth_session(data=data)
token = auth.access_token
me = auth.get('user').json()
user = User.get_or_init(me['login'])
# if user.id is None:
# new record isn't saved yet
# flash(_('Welcome to Bakery.'))
# update user data
user.name = me.get('name', me.get('login'))
user.github_access_token = token
user.avatar = me['gravatar_id']
user.email = me['email']
# save to database
db.session.add(user)
db.session.commit()
session['user_id'] = user.id
g.user = user
flash(_('Welcome!'))
return redirect(next_url)
@gitauth.route('/logout')
def logout():
session.pop('user_id', None)
return redirect(url_for('frontend.splash'))
| Python | 0.000008 | @@ -3275,34 +3275,8 @@
ser%0A
- flash(_('Welcome!'))%0A%0A
|
49152781ecbfb4f51707e6e54641301038eba80f | set varchar length | king/DataPoint.py | king/DataPoint.py | from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, PickleType, Boolean, String, DateTime
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
engine = create_engine('mysql+pymysql://ucb_268_measure:[email protected]:3306/mydb', echo=False)
Base = declarative_base(bind=engine)
Session = sessionmaker(bind=engine)
class DataPoint(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime)
name1 = Column(String)
name2 = Column(String)
target1 = Column(PickleType)
target2 = Column(PickleType)
start = Column(DateTime)
end = Column(DateTime)
pings = Column(PickleType)
address = Column(PickleType)
test_point = Column(String)
success = Column(Boolean)
def __init__(self, name1, name2, target1, target2, start, end,
pings, address, test_point, success):
self.timestamp = datetime.now()
self.name1 = name1
self.name2 = name2
self.target1 = target1
self.target2 = target2
self.start = start
self.end = end
self.pings = pings
self.address = address
self.test_point = test_point
self.success = success
Base.metadata.create_all()
| Python | 0.000068 | @@ -599,24 +599,36 @@
olumn(String
+(length=200)
)%0A name2
@@ -638,24 +638,36 @@
olumn(String
+(length=200)
)%0A target
@@ -872,16 +872,28 @@
n(String
+(length=200)
)%0A su
|
4795eb8f7b59c915a15afef6ac884e46e2682564 | Revert "suppress error" | daemon16.py | daemon16.py | #!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon16.py reports various UPS variables.
# uses moving averages
import syslog, traceback
import os, sys, time, math, subprocess
from libdaemon import Daemon
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
class MyDaemon(Daemon):
def run(self):
reportTime = 60 # time [s] between reports
cycles = 3 # number of cycles to aggregate
samplesperCycle = 5 # total number of samples in each cycle
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
cycleTime = samples * sampleTime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
startTime = time.time()
result = do_work().split(',')
if DEBUG:print "result:",result
data.append(map(float, result))
if (len(data) > samples):data.pop(0)
# report sample average
if (startTime % reportTime < sampleTime):
if DEBUG:print "data:",data
somma = map(sum,zip(*data))
averages = [format(s / len(data), '.3f') for s in somma]
if DEBUG:print "averages:",averages
do_report(averages)
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print "Unexpected error:"
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def syslog_trace(trace):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line:
syslog.syslog(syslog.LOG_ALERT,line)
def do_work():
# 5 datapoints gathered here
upsc = subprocess.check_output(["upsc", "ups@localhost", "2>&1"]).splitlines()
for element in range(0, len(upsc) - 1):
var = upsc[element].split(': ')
if (var[0] == 'input.voltage'):
ups0 = float(var[1])
if (var[0] == 'battery.voltage'):
ups1 = float(var[1])
if (var[0] == 'battery.charge'):
ups2 = float(var[1])
if (var[0] == 'ups.load'):
ups3 = float(var[1])
if (var[0] == 'battery.runtime'):
ups4 = float(var[1])
return '{0}, {1}, {2}, {3} ,{4}'.format(ups0, ups1, ups2, ups3, ups4)
def do_report(result):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = time.strftime('%Y-%m-%dT%H:%M:%S, %s')
result = ', '.join(map(str, result))
flock = '/tmp/raspdiagd/16.lock'
lock(flock)
with open('/tmp/raspdiagd/16-aux-ups.csv', 'a') as f:
f.write('{0}, {1}\n'.format(outDate, result) )
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/raspdiagd/16.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: {0!s} start|stop|restart|foreground".format(sys.argv[0])
sys.exit(2)
| Python | 0 | @@ -2357,17 +2357,16 @@
%5B%22upsc%22,
-
%22ups@loc
@@ -2376,16 +2376,8 @@
ost%22
-, %222%3E&1%22
%5D).s
|
b40ae3422ed5a5da8387d9c21bd25ae7ccf25ff6 | use in keyword | duration.py | duration.py | import re
_millisecond_size = 1.0
_second_size = 100.0 * _millisecond_size
_minute_size = 60.0 * _second_size
_hour_size = 60.0 * _minute_size
_day_size = 24.0 * _hour_size
_week_size = 7.0 * _day_size
class Duration():
def __init__(self, value):
self._milliseconds = int(value)
def nanoseconds(self):
return int(self._milliseconds * 1000000)
def microseconds(self):
return int(self._milliseconds * 1000)
def millisecond(self):
return self._milliseconds
def seconds(self):
return int(self._milliseconds / _second_size)
def minutes(self):
return int(self._milliseconds / _minute_size)
def hours(self):
return int(self._milliseconds / _hour_size)
def days(self):
return int(self._milliseconds / _day_size)
def weeks(self):
return int(self._milliseconds / _week_size)
def __str__(self):
result_str = ""
milliseconds = abs(self._milliseconds)
sign = "-" if self._milliseconds < 0 else ""
if not milliseconds:
return "0"
hours = int(milliseconds / _hour_size)
if hours:
milliseconds -= _hour_size * hours
result_str += "{}h".format(hours)
minutes = int(milliseconds / _minute_size)
if minutes:
milliseconds -= _minute_size * minutes
result_str += "{}m".format(minutes)
seconds = int(milliseconds / _second_size)
if seconds:
milliseconds -= _second_size * seconds
result_str += "{}s".format(seconds)
if milliseconds:
result_str += "{}ms".format(milliseconds)
return "{}{}".format(sign, result_str)
def __repr__(self):
return "Duration({})".format(self.__str__())
def __int__(self):
return int(self._milliseconds)
def __float__(self):
return float(self._milliseconds)
def __long__(self):
return long(self._milliseconds)
def __add__(self, other):
if not instanceof(other, Duration):
raise Exception("only Durations can be added to durations")
return Duration(int(self) + int(other))
def __sub__(self, other):
if not instanceof(other, Duration):
raise Exception("only Durations can be subtracted from Durations")
return Duration(int(self) - int(other))
def __mul__(self, other):
if not instanceof(other, (int, long, float)):
raise Exception("Durations can only be multiplied by scalar values")
return Duration(int(self) * int(other))
def __div__(self, other):
if instanceof(other, (int, long, float)):
return Duration(int(self) / int(other))
if instanceof(other, Duration):
return int(self) / int(other)
def __neg__(self):
return Duration(-int(self))
def __pos__(self):
return Duration(int(self))
def __abs__(self):
return Duration(abs(int(self)))
def parse(duration):
units = {
"ms" : _millisecond_size,
"s" : _second_size,
"m" : _minute_size,
"h" : _hour_size,
"d" : _day_size,
"w" : _week_size
}
if duration == "0" or duration == "+0" or duration == "-0":
return Duration(0)
pattern = re.compile('([\-\+\d\.]+)([a-z]+)')
total = 0
sign = -1 if duration[0] == '-' else 1
matches = pattern.findall(duration)
if not len(matches):
raise Exception("invalid duration")
for (value, unit) in matches:
try:
total += int(value) * units[unit]
except:
raise Exception("invalid duration")
return Duration(sign * total)
millisecond = Duration(_millisecond_size)
second = Duration(_second_size)
minute = Duration(_minute_size)
hour = Duration(_hour_size)
day = Duration(_day_size)
week = Duration(_week_size)
| Python | 0.000002 | @@ -3291,54 +3291,28 @@
ion
-== %220%22 or duration == %22+0%22 or duration ==
+in (%220%22, %22+0%22,
%22-0%22
+)
:%0A
|
93db3543a576ccde905fc77d7c3ad825f6a100a1 | change threshold | misc_scripts/compare_bounds.py | misc_scripts/compare_bounds.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re
from fontTools.ttLib import TTFont
from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen
class ConcordanceInfo(object):
def __init__(self):
self.glyphs = 0
self.concordant_glyphs = 0
self.maxdiff = 0
self.maxdiff_gname = None
def update(self, diff, gname):
self.glyphs += 1
if diff <= 1:
self.concordant_glyphs += 1
elif diff > self.maxdiff:
self.maxdiff = round(diff, 2)
self.maxdiff_gname = gname
def calc_bounds(font, gname, penclass):
gs = font.getGlyphSet()
g = gs[gname]
pen = penclass(gs)
g.draw(pen)
return [round(v, 2) for v in pen.bounds] if pen.bounds is not None else None
def bounds_differ(bounds1, bounds2):
for v1, v2 in zip(bounds1, bounds2):
if abs(v1 - v2) > 1:
return True
return False
def compare_bounds():
font1_path = sys.argv[1]
font2_path = sys.argv[2]
font1 = TTFont(font1_path, fontNumber=0)
font2 = TTFont(font2_path, fontNumber=0)
for gname in font1.getGlyphOrder():
bounds1 = calc_bounds(font1, gname, BoundsPen)
bounds2 = calc_bounds(font2, gname, BoundsPen)
if bounds1 is None or bounds2 is None:
if bounds1 is not None or bounds2 is not None:
print "[{}] {} {}".format(gname, bounds1, bounds2)
elif bounds_differ(bounds1, bounds2):
print "[{}] {} {}".format(gname, bounds1, bounds2)
def main():
compare_bounds()
if __name__ == "__main__":
main()
| Python | 0.000001 | @@ -883,16 +883,17 @@
- v2) %3E
+=
1:%0A
|
df88bc165e0a505b07c84aea4a29bf3c048895ac | replace OSError with FileNotFoundError when appropriate | dwi/hdf5.py | dwi/hdf5.py | """Support for HDF5 files."""
from collections import OrderedDict
import numpy as np
import h5py
import dwi.util
DEFAULT_DSETNAME = 'default'
DEFAULT_DSETPARAMS = dict(
compression='gzip', # Smaller, compatible.
# compression='lzf', # Faster.
shuffle=True, # Rearrange bytes for better compression.
fletcher32=True, # Flether32 checksum.
track_times=False, # Dataset creation timestamps.
)
def iterable(x):
"""Tell whether an object is iterable or not."""
try:
iter(x)
except TypeError:
return False
else:
return True
def write_hdf5(filename, array, attrs, fillvalue=None,
dsetname=DEFAULT_DSETNAME):
"""Write an array with attributes into a newly created, compressed HDF5
file.
"""
f = h5py.File(filename, 'w')
dset = f.create_dataset(dsetname, data=array, fillvalue=fillvalue,
**DEFAULT_DSETPARAMS)
write_attrs(dset, attrs)
f.close()
def read_hdf5(filename, ondisk=False, dsetname=DEFAULT_DSETNAME):
"""Read an array with attributes from an HDF5 file.
With parameter "ondisk" True it will not be read into memory."""
try:
f = h5py.File(filename, 'r')
except IOError as e:
if e.filename is None:
e.filename = filename
raise
if dsetname not in f:
# No dataset of given name, try the one there is.
try:
dsetname, = f.keys()
except ValueError:
raise ValueError('Ambiguous content: {}'.format(filename))
dset = f[dsetname]
if ondisk:
array = dset
else:
array = np.array(dset)
attrs = read_attrs(dset)
if not ondisk:
f.close()
return array, attrs
def create_hdf5(filename, shape, dtype, fillvalue=None,
dsetname=DEFAULT_DSETNAME):
"""Create a HDF5 file and return the dataset for manipulation.
Attributes and the file object can be accessed by dset.attrs and dset.file.
"""
f = h5py.File(filename, 'w')
dset = f.create_dataset(dsetname, shape, dtype=dtype, fillvalue=fillvalue,
**DEFAULT_DSETPARAMS)
return dset
def write_attrs(dset, attrs):
"""Update dataset attributes from dictionary. This is a wrapper for
conversion needs, like string encoding and None to nan.
"""
for k, v in attrs.items():
dset.attrs[k] = convert_value_write(v)
def read_attrs(dset):
"""Read attributes from dataset. This is a wrapper for conversion needs."""
return OrderedDict((k, convert_value_read(v)) for k, v in
dset.attrs.items())
def convert_value_write(v):
"""HDF5 doesn't understand None objects, so replace any with nan values."""
def convert_item(x):
"""Convert sequence item."""
if x is None:
return np.nan
if isinstance(x, str):
return x.encode()
return x
if iterable(v) and not isinstance(v, str):
v = [convert_item(x) for x in v]
return v
def convert_value_read(value):
"""Convert attribute value from bytes to string."""
if isinstance(value, bytes):
return value.decode()
elif not np.isscalar(value) and np.issubsctype(value, np.bytes_):
return value.astype(np.str_)
return value
| Python | 0.000039 | @@ -97,23 +97,34 @@
py%0A%0A
-import dwi.util
+from dwi.types import Path
%0A%0ADE
@@ -1228,16 +1228,88 @@
e, 'r')%0A
+ # On missing file, h5py raises OSError without errno (or filename).%0A
exce
@@ -1315,10 +1315,10 @@
ept
-I
O
+S
Erro
@@ -1340,32 +1340,198 @@
if
-e.filename is None:%0A
+not Path(filename).exists():%0A raise FileNotFoundError(2, 'No such file or directory', filename)%0A e.filename = e.filename or filename%0A raise%0A except IOError as e:%0A
@@ -1546,16 +1546,30 @@
lename =
+ e.filename or
filenam
|
5d1f9d3eaa27c0abf555fe3c79e9c11f9f7167ae | Fix redundant warning about 'file_name' config value. | foliant/pandoc.py | foliant/pandoc.py | """Wrapper around Pandoc. Used by builder."""
from __future__ import print_function
import subprocess
from . import gitutils
PANDOC_PATH = "pandoc"
FROM_PARAMS = "-f markdown_strict+simple_tables+multiline_tables+grid_tables+pipe_tables+table_captions+fenced_code_blocks+line_blocks+definition_lists+all_symbols_escapable+strikeout+superscript+subscript+lists_without_preceding_blankline+implicit_figures+raw_tex+citations+tex_math_dollars+header_attributes+auto_identifiers+startnum+footnotes+inline_notes+fenced_code_attributes+intraword_underscores+escaped_line_breaks"
LATEX_PARAMS = "--no-tex-ligatures --smart --normalize --listings --latex-engine=xelatex"
def generate_variable(key, value):
"""Generate a ``--variable key=value`` entry."""
return '--variable "%s"="%s"' % (key, value)
def generate_command(params, output_file, src_file, cfg):
"""Generate the entire Pandoc command with params to invoke."""
params = ["-o " + output_file, FROM_PARAMS, LATEX_PARAMS, params]
for key, value in cfg.items():
if key in ("title", "second_title", "year", "date", "title_page", "tof", "toc"):
params.append(generate_variable(key, value))
elif key == "template":
params.append('--template="%s.tex"' % value)
elif key == "lang":
if value in ("russian", "english"):
params.append(generate_variable(value, "true"))
else:
params.append(generate_variable("russian", "true"))
elif key == "version":
if value == "auto":
params.append(generate_variable(key, gitutils.get_version()))
else:
params.append(generate_variable(key, value))
elif key == "company":
if value in ("restream", "undev"):
params.append(generate_variable(value, "true"))
else:
raise RuntimeError("Unsupported company: %s" % value)
elif key in ("type", "alt_doc_type"):
if value:
params.append(generate_variable(key, value))
elif key == "filters":
for filt in value:
params.append("-F %s" % filt)
else:
print("Unsupported config key: %s" % key)
return ' '.join([PANDOC_PATH] + params + [src_file])
def run(command, src_dir):
"""Invoke the Pandoc executable with the generated params."""
print("Baking output... ", end='')
try:
proc = subprocess.check_output(
command,
cwd=src_dir,
stderr=subprocess.PIPE
)
print("Done!")
except subprocess.CalledProcessError as e:
quit(e.stderr.decode())
def to_pdf(src_file, output_file, tmp_path, cfg):
"""Convert Markdown to PDF via Pandoc."""
pandoc_command = generate_command(
"-t latex",
output_file,
src_file,
cfg
)
run(pandoc_command, tmp_path)
def to_docx(src_file, output_file, tmp_path, cfg):
"""Convert Markdown to Docx via Pandoc."""
pandoc_command = generate_command(
'--reference-docx="ref.docx"',
output_file,
src_file,
cfg
)
run(pandoc_command, tmp_path)
def to_tex(src_file, output_file, tmp_path, cfg):
"""Convert Markdown to TeX via Pandoc."""
pandoc_command = generate_command(
"-t latex",
output_file,
src_file,
cfg
)
run(pandoc_command, tmp_path)
| Python | 0 | @@ -2180,16 +2180,66 @@
%25 filt)%0A
+ elif key == %22file_name%22:%0A pass%0A
|
a324051e28d359a1591dff48fa4bbb32c3caf44a | add loaders __doc__ | src/loaders/__init__.py | src/loaders/__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) 2018 shmilee
import os
from ..glogger import getGLogger
from . import base
__all__ = ['get_rawloader', 'is_rawloader', 'get_pckloader', 'is_pckloader']
log = getGLogger('L')
rawloader_names = ['DirRawLoader', 'TarRawLoader', 'SftpRawLoader']
pckloader_names = ['CachePckLoader', 'NpzPckLoader', 'Hdf5PckLoader']
pckloader_types = ['.cache', '.npz', '.hdf5']
def get_rawloader(path, filenames_filter=None):
'''
Given a path, return a raw loader instance.
Raises IOError if path not found, ValueError if path type not supported.
Notes
-----
*path* types:
1. local directory
2. tar archive file
3. directory in remote SSH server
format: 'sftp://username[:passwd]@host[:port]##remote/path'
'''
path = str(path)
if os.path.isdir(path):
from .dirraw import DirRawLoader
loader = DirRawLoader(path, filenames_filter=filenames_filter)
elif os.path.isfile(path):
import tarfile
if tarfile.is_tarfile(path):
from .tarraw import TarRawLoader
loader = TarRawLoader(path, filenames_filter=filenames_filter)
else:
raise ValueError(
"Unsupported File '%s'! Try with an tar archive!" % path)
elif path.startswith('sftp://'):
from .sftpraw import SftpRawLoader
loader = SftpRawLoader(path, filenames_filter=filenames_filter)
else:
raise IOError("Can't find path '%s'!" % path)
return loader
def is_rawloader(obj):
'''
Return True if obj is a raw loader instance, else return False.
'''
return isinstance(obj, base.BaseRawLoader)
def get_pckloader(path, datagroups_filter=None):
'''
Given a file or cache path, return a pickled loader instance.
Raises IOError if path not found, ValueError if path type not supported.
Notes
-----
*path* types:
1. '.npz' file
2. '.hdf5' file
3. dict object
'''
if isinstance(path, str) and os.path.isfile(path):
ext = os.path.splitext(path)[1]
if ext == '.npz':
from .npzpck import NpzPckLoader
loader = NpzPckLoader(path, datagroups_filter=datagroups_filter)
elif ext == '.hdf5':
from .hdf5pck import Hdf5PckLoader
loader = Hdf5PckLoader(path, datagroups_filter=datagroups_filter)
else:
raise ValueError('Unsupported Filetype: "%s"! '
'Did you mean one of: "%s"?'
% (ext, ', '.join(pckloader_types[1:])))
elif isinstance(path, dict):
from .cachepck import CachePckLoader
loader = CachePckLoader(path, datagroups_filter=datagroups_filter)
else:
raise IOError("Can't find path '%s'!" % path)
return loader
def is_pckloader(obj):
'''
Return True if obj is a pickled loader instance, else return False.
'''
return isinstance(obj, base.BasePckLoader)
| Python | 0.000002 | @@ -48,16 +48,904 @@
hmilee%0A%0A
+'''%0AThis is the subpackage %60%60loaders%60%60 of gdpy3.%0AIt contains two kinds of loaders.%0A%0A1. %60%60RawLoader%60%60, get by :func:%60get_rawloader%60.%0A %60%60RawLoader%60%60 has attributes%0A :attr:%60base.BaseRawLoader.path%60%60,%0A :attr:%60base.BaseRawLoader.filenames%60%0A and methods%0A :meth:%60base.BaseRawLoader.keys%60,%0A :meth:%60base.BaseRawLoader.get%60,%0A :meth:%60base.BaseLoader.find%60,%0A :meth:%60base.BaseLoader.all_in_loader%60.%0A%0A2. %60%60PckLoader%60%60, get by :func:%60get_pckloader%60.%0A %60%60PckLoader%60%60 has attributes%0A :attr:%60base.BasePckLoader.path%60%60,%0A :attr:%60base.BasePckLoader.datakeys%60,%0A :attr:%60base.BasePckLoader.datagroups%60,%0A :attr:%60base.BasePckLoader.description%60,%0A :attr:%60base.BasePckLoader.cache%60,%0A and methods%0A :meth:%60base.BasePckLoader.keys%60,%0A :meth:%60base.BasePckLoader.get%60,%0A :meth:%60base.BasePckLoader.get_many%60,%0A :meth:%60base.BaseLoader.find%60,%0A :meth:%60base.BaseLoader.all_in_loader%60.%0A'''%0A%0A
import o
@@ -2626,21 +2626,26 @@
ile
-or cache path
+path or dict cache
, re
|
bfe6c752aa2a95cc28109f4819cf6a9e88e7ee4b | remove unnecessary comments | stores/views.py | stores/views.py | from newt.views import JSONRestView
from common.response import json_response
from django.conf import settings
import json
store_adapter = __import__(settings.NEWT_CONFIG['ADAPTERS']['STORES'], globals(), locals(), ['adapter'], -1)
import logging
logger = logging.getLogger(__name__)
class StoresRootView(JSONRestView):
def get(self, request):
logger.debug("Entering %s:%s" % (self.__class__.__name__, __name__))
return store_adapter.get_stores()
def post(self, request):
# Temporary: creates a store with a random name
# TODO: create a store with a given name
import uuid
import random
import string
rand_key = random.choice(string.ascii_letters) + str(uuid.uuid4())[0:8]
while(rand_key in store_adapter.get_stores()):
rand_key = str(uuid.uuid4())[0:8]
# TODO: parse post body for initial datas
if request.POST.get("data", False):
return store_adapter.create_store(rand_key, [request.POST.get("data")])
else:
return store_adapter.create_store(rand_key)
class StoresView(JSONRestView):
def get(self, request, store_name):
try:
if request.GET.get("query", False):
data = store_adapter.query_store(store_name, request.GET.get("query"))
else:
data = store_adapter.get_store_contents(store_name)
except Exception as e:
logger.error("Invalid store requested: %s", store_name)
return json_response(status="ERROR", status_code=500, error=e.args[0])
return data
def post(self, request, store_name):
if store_name in store_adapter.get_stores():
if request.POST.get("data", False):
data = request.POST.get("data")
return store_adapter.store_insert(store_name, data)
else:
return json_response(status="ERROR", status_code=500, error="No data recieved.")
else:
if request.POST.get("data", False):
return store_adapter.create_store(store_name, [request.POST.get("data")])
else:
return store_adapter.create_store(store_name)
def put(self, request, store_name):
data = json.loads(request.read())
store_adapter.update(store_name, data)
def delete(self, request, store_name):
return store_adapter.delete_store(store_name)
class StoresObjView(JSONRestView):
def get(self, request, store_name, obj_id):
try:
return store_adapter.store_get_obj(store_name, obj_id)
except Exception as e:
return json_response(status="ERROR", status_code="500", error=e.args[0])
def put(self, request, store_name, obj_id):
from django.http import QueryDict
body = QueryDict(request.body)
if body.get("data", False):
data = body.get("data")
return store_adapter.store_update(store_name, obj_id, data)
else:
return json_response(status="ERROR", status_code=500, error="No data recieved.") | Python | 0 | @@ -554,59 +554,32 @@
name
-%0A%0A # TODO: create a store with a given name%0A
+ if no name is specified
%0A
@@ -826,59 +826,8 @@
8%5D%0A%0A
- # TODO: parse post body for initial datas%0A%0A
@@ -2365,16 +2365,17 @@
_name)%0A%0A
+%0A
class St
|
d97bb53f74c11b654f506f7e14342e7b3582a4c4 | Fix duplicate test method names. | eliot/tests/test_api.py | eliot/tests/test_api.py | """
Tests for the public API exposed by L{eliot}.
"""
from __future__ import unicode_literals
from unittest import TestCase
from .._output import Logger
import eliot
class PublicAPITests(TestCase):
"""
Tests for the public API.
"""
def test_addDestination(self):
"""
L{eliot.addDestination} adds destinations to the L{Destinations}
attached to L{Logger}.
"""
self.assertEqual(eliot.addDestination, Logger._destinations.add)
def test_removeDestination(self):
"""
L{eliot.addDestination} removes destinations from the L{Destinations}
attached to L{Logger}.
"""
self.assertEqual(eliot.removeDestination, Logger._destinations.remove)
def test_removeDestination(self):
"""
L{eliot.addGlobalFields} calls the corresponding method on the
L{Destinations} attached to L{Logger}.
"""
self.assertEqual(eliot.addGlobalFields,
Logger._destinations.addGlobalFields)
class PEP8Tests(TestCase):
"""
Tests for the PEP 8 variant of the the public API.
"""
def test_add_destination(self):
"""
L{eliot.addDestionation} is the same as L{eliot.add_destination}.
"""
self.assertIs(eliot.add_destination, eliot.addDestination)
def test_remove_destination(self):
"""
L{eliot.removeDestionation} is the same as L{eliot.remove_destination}.
"""
self.assertIs(eliot.remove_destination, eliot.removeDestination)
def test_add_global_fields(self):
"""
L{eliot.add_global_fields} is the same as L{eliot.addGlobalFields}.
"""
self.assertIs(eliot.add_global_fields, eliot.addGlobalFields)
def test_write_traceback(self):
"""
L{eliot.writeTraceback} is the same as L{eliot.write_traceback}.
"""
self.assertIs(eliot.write_traceback, eliot.writeTraceback)
def test_write_failure(self):
"""
L{eliot.writeFailure} is the same as L{eliot.write_failure}.
"""
self.assertIs(eliot.write_failure, eliot.writeFailure)
def test_start_task(self):
"""
L{eliot.startTask} is the same as L{eliot.start_task}.
"""
self.assertIs(eliot.start_task, eliot.startTask)
def test_start_action(self):
"""
L{eliot.startAction} is the same as L{eliot.start_action}.
"""
self.assertIs(eliot.start_action, eliot.startAction)
| Python | 0.000013 | @@ -745,33 +745,31 @@
ef test_
-removeDestination
+addGlobalFields
(self):%0A
|
be0b85f50b8cd4f7323d5c6def5c388c7a8fad36 | fix webhook | webhooks.py | webhooks.py | from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import os
import shutil
from episode import GitRepo, Episode
WORK_DIR = "repo"
class WebHookHandler(BaseHTTPRequestHandler):
def do_POST(self):
event_type = self.headers.get('X-Github-Event')
if event_type != 'push':
return
length = int(self.headers.get('Content-Length'))
http_body = self.rfile.read(length).decode('utf-8')
data = json.loads(http_body)
ref = data.get('ref')
if ref != 'refs/heads/source':
return
# todo: pull repo & branch to source & build & push to master
repo_addr = data.get("repository")['ssh_url']
print('repo', repo_addr)
repo = GitRepo(repo_address=repo_addr, dst=WORK_DIR)
repo.clone()
os.chdir(WORK_DIR)
repo.checkout_or_create("source")
Episode().deploy()
shutil.rmtree(WORK_DIR)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# self.wfile.write(bytes("Hello World", "utf-8"))
return
if __name__ == "__main__":
port = 8000
Handler = WebHookHandler
httpd = HTTPServer(("0.0.0.0", port), Handler)
print("Serving at http://127.0.0.1:{port}".format(port=port))
httpd.serve_forever() | Python | 0.000017 | @@ -904,16 +904,38 @@
eploy()%0A
+ os.chdir(%22..%22)
%0A
|
7581bd9b435c53b09d8ecb7683fd63b5d1399a2e | Switch to the next dev version. | knossos/center.py | knossos/center.py | ## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import os
import sys
import json
from . import uhf
uhf(__name__)
from .qt import QtCore # noqa
# The version should follow the http://semver.org guidelines.
# Only remove the -dev tag if you're making a release!
VERSION = '0.12.0-dev'
UPDATE_LINK = 'https://fsnebula.org/knossos'
INNOEXTRACT_LINK = 'https://fsnebula.org/storage/knossos/innoextract.json'
DEBUG = os.getenv('KN_DEBUG', '0').strip() == '1'
SENTRY_DSN = 'https://77179552b41946488346a9a2d2669d74:[email protected]/9?timeout=5'
LANGUAGES = {
'en': 'English'
}
app = None
main_win = None
fs2_watcher = None
pmaster = None
auto_fetcher = None
mods = None
installed = None
fso_flags = None
has_retail = None
raven = None
raven_handler = None
settings = {
'fs2_bin': None,
'fred_bin': None,
'base_path': None,
'base_dirs': [],
'custom_bar': True,
'hash_cache': None,
'max_downloads': 3,
'download_bandwidth': -1.0, # negative numbers are used to specify no limit
'repos': [('https://fsnebula.org/storage/repo.json', 'FSNebula')],
'nebula_link': 'https://fsnebula.org/api/1/',
'nebula_web': 'https://fsnebula.org/',
'update_notify': True,
'use_raven': True,
'sdl2_path': None,
'openal_path': None,
'language': None,
'neb_user': '',
'neb_password': '',
'engine_stability': 'stable',
'fso_flags': {},
'joystick': {
'guid': None,
'id': 99999
}
}
if sys.platform.startswith('win'):
settings_path = os.path.expandvars('$APPDATA/knossos')
elif 'XDG_CONFIG_HOME' in os.environ or sys.platform.startswith('linux'):
config_home = os.environ.get('XDG_CONFIG_HOME', '')
if config_home == '':
# As specified by the XDG Base Directory Specification this should be the default
config_home = os.path.expandvars('$HOME/.config')
settings_path = os.path.join(config_home, 'knossos')
old_path = os.path.expandvars('$HOME/.knossos')
if not os.path.isdir(settings_path) and os.path.isdir(old_path):
settings_path = old_path
del old_path, config_home
elif sys.platform == 'darwin':
old_path = os.path.expandvars('$HOME/.knossos')
settings_path = os.path.expandvars('$HOME/Library/Preferences/knossos')
if not os.path.isdir(settings_path) and os.path.isdir(old_path):
settings_path = old_path
del old_path
else:
settings_path = os.path.expanduser('~/.knossos')
class _SignalContainer(QtCore.QObject):
update_avail = QtCore.Signal('QVariant')
task_launched = QtCore.Signal(QtCore.QObject)
signals = _SignalContainer()
def save_settings():
settings['hash_cache'] = dict()
# Other threads might be using the hash cache. Make a local copy to avoid problems.
for path, info in list(util.HASH_CACHE.items()):
# Skip deleted files
if os.path.exists(path):
settings['hash_cache'][path] = info
with open(os.path.join(settings_path, 'settings.json'), 'w', errors='replace') as stream:
json.dump(settings, stream)
from . import util # noqa
| Python | 0 | @@ -887,17 +887,17 @@
N = '0.1
-2
+4
.0-dev'%0A
|
ae99e958a6f1b021e445a0ba4c665df4be13671d | Load config from file system | wikibugs.py | wikibugs.py | #!/usr/bin/env python
from dogpile.cache import make_region
import phabricator
import configfetcher
import rqueue
conf = configfetcher.ConfigFetcher()
redis_region = make_region().configure(
'dogpile.cache.redis',
arguments={
'host': conf.get('REDIS_HOST'),
'port': 6379,
'db': 0,
'redis_expiration_time': 60*60*2, # 2 hours
'distributed_lock': True
}
)
mem_region = make_region().configure(
'dogpile.cache.memory'
)
class Wikibugs2(object):
def __init__(self, conf):
"""
:param conf: Config
:type conf: configfetcher.ConfigFetcher
"""
self.conf = conf
self.phab = phabricator.Phabricator(
self.conf.get('PHAB_HOST'),
self.conf.get('PHAB_USER'),
self.conf.get('PHAB_CERT')
)
self.rqueue = rqueue.RedisQueue(
conf.get('REDIS_QUEUE_NAME'),
conf.get('REDIS_HOST')
)
@mem_region.cache_on_arguments()
@redis_region.cache_on_arguments()
def get_user_name(self, phid):
"""
:param phid: A PHID- thingy representing a user
:type phid: basestring
"""
info = self.phab.request('user.query', {
'phids': [phid]
})
return info[0]['userName']
def get_project_name(self, phid):
return self.cached_phid_info(phid)['name']
def poll(self):
events = self.phab.request('feed.query', {
'view': 'data'
})
for event in events.values():
self.process_event(event)
def phid_info(self, phid):
info = self.phab.request('phid.query', {
'phids': [phid]
})
return info.values()[0]
def maniphest_info(self, task_id):
"""
:param task_id: T###
:type task_id: basestring
"""
task_id = int(task_id[1:])
info = self.phab.request('maniphest.info', {
'task_id': task_id
})
return info
@mem_region.cache_on_arguments()
@redis_region.cache_on_arguments()
def cached_phid_info(self, phid):
"""
Same thing as phid_info, but
cached
"""
return self.phid_info(phid)
def get_transaction_info(self, task_id, timestamp):
"""
:param task_id: T###
:type task_id: basestring
:param timestamp: "epoch timestamp"
:type timestamp: basestring
"""
task_id = int(task_id[1:])
info = self.phab.request('maniphest.gettasktransactions', {
'ids': [task_id]
})
transactions = {}
for trans in info.values()[0]:
if trans['dateCreated'] == timestamp: # Yeah, this is a hack, but it works
transactions[trans['transactionType']] = {
'old': trans['oldValue'],
'new': trans['newValue'],
}
if trans['comments'] is not None:
transactions[trans['transactionType']]['comments'] = trans['comments']
return transactions
def process_event(self, event_info):
"""
:type event_info: dict
"""
timestamp = str(event_info['epoch'])
phid_info = self.phid_info(event_info['data']['objectPHID'])
if phid_info['type'] != 'TASK': # Only handle Maniphest Tasks for now
return
if phid_info['uri'] != 'https://phab-01.wmflabs.org/T84':
return
task_info = self.maniphest_info(phid_info['name'])
# Start sorting this into things we care about...
useful_event_metadata = {
'url': phid_info['uri'],
'projects': [self.get_project_name(phid) for phid in task_info['projectPHIDs']],
'user': self.get_user_name(event_info['authorPHID']),
}
transactions = self.get_transaction_info(phid_info['name'], timestamp)
if 'ccs' in transactions and len(transactions) == 1:
# Ignore any only-CC updates
return
if 'title' in transactions:
useful_event_metadata['title'] = transactions['title']['new']
if transactions['title']['old'] is None:
useful_event_metadata['new'] = True
else:
# Technically there's a race condition if the title is changed
# in another event before our API request is made, but meh
# Name is in the format "T123: FooBar", so get rid of the prefix
useful_event_metadata['title'] = phid_info['fullName'].split(':', 1)[1].strip()
if 'core:comment' in transactions:
useful_event_metadata['comment'] = transactions['core:comment'].get('comments', 'Removed.')
for _type in ['status', 'priority']:
if _type in transactions:
useful_event_metadata[_type] = transactions[_type]
if 'reassign' in transactions:
trans = transactions['reassign']
info = {}
for _type in ['old', 'new']:
if trans[_type] is not None:
info[_type] = self.get_user_name(trans[_type])
else:
info[_type] = None
useful_event_metadata['assignee'] = info
print useful_event_metadata
self.rqueue.put(useful_event_metadata)
if __name__ == '__main__':
bugs = Wikibugs2(conf)
bugs.poll()
| Python | 0 | @@ -16,16 +16,38 @@
python%0A%0A
+import os%0Aimport json%0A
from dog
@@ -5394,16 +5394,162 @@
ain__':%0A
+ conf_path = os.path.join(os.path.dirname(__file__), 'config.json')%0A with open(conf_path) as conf_file:%0A conf = json.load(conf_file)%0A
bugs
|
4787c9e1b895b5ce0bdd0fedeb537a971fab5933 | add management command to benchmark get_direct_ccz | corehq/apps/app_manager/management/commands/benchmark_direct_ccz.py | corehq/apps/app_manager/management/commands/benchmark_direct_ccz.py | Python | 0.000001 | @@ -0,0 +1,921 @@
+from __future__ import absolute_import%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0A%0Aimport json%0A%0Afrom django.core.management import BaseCommand%0A%0Afrom corehq.apps.app_manager.dbaccessors import get_app%0Afrom corehq.apps.app_manager.management.commands.benchmark_build_times import Timer%0Afrom corehq.apps.app_manager.views.cli import get_direct_ccz%0A%0A%0Aclass Command(BaseCommand):%0A%0A def add_arguments(self, parser):%0A parser.add_argument(%0A 'domain_app_id_pairs',%0A help='A JSON list where each element has the format %5B%3Cdomain%3E, %3Capp_id%3E%5D',%0A type=json.loads,%0A )%0A%0A def handle(self, domain_app_id_pairs, **options):%0A for (domain, app_id) in domain_app_id_pairs:%0A print(%22%25s: %25s%22 %25 (domain, app_id))%0A with Timer():%0A app = get_app(domain, app_id)%0A get_direct_ccz(domain, app, None, None)%0A
|
|
db356499cf079ec9284baf16817d3c3054d8688d | Add source_added tests | tests/integration/states/test_chocolatey.py | tests/integration/states/test_chocolatey.py | # -*- coding: utf-8 -*-
"""
Tests for the Chocolatey State
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.platform
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
__testcontext__ = {}
@destructiveTest
@skipIf(not salt.utils.platform.is_windows(), "Windows Specific Test")
class ChocolateyTest(ModuleCase, SaltReturnAssertsMixin):
"""
Chocolatey State Tests
These tests are destructive as the install and remove software
"""
def setUp(self):
"""
Ensure that Chocolatey is installed
"""
super(ChocolateyTest, self).setUp()
if "chocolatey" not in __testcontext__:
self.run_function("chocolatey.bootstrap")
__testcontext__["chocolatey"] = True
def test_chocolatey(self):
"""
Test the following:
- `chocolatey.installed`
- `chocolatey.upgraded`
- `chocolatey.uninstalled`
"""
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so this package
# needs to NOT be installed before we run the states below
target = "firefox"
pre_version = "52.0.2"
upg_version = "57.0.2"
log.debug("Making sure %s is not installed", target)
self.assertFalse(self.run_function("chocolatey.version", [target]))
try:
####################################################
# Test `chocolatey.installed`
####################################################
# Install the package
log.debug("Testing chocolatey.installed")
ret = self.run_state(
"chocolatey.installed", name=target, version=pre_version
)
self.assertSaltTrueReturn(ret)
# Verify the package is installed
log.debug("Verifying install success")
ret = self.run_function("chocolatey.version", [target])
self.assertEqual(ret, {"Firefox": [pre_version]})
####################################################
# Test `chocolatey.upgraded`
####################################################
# Upgrade the package
log.debug("Testing chocolatey.upgraded")
ret = self.run_state(
"chocolatey.upgraded", name=target, version=upg_version
)
self.assertSaltTrueReturn(ret)
# Verify the package is upgraded
log.debug("Verifying upgrade success")
ret = self.run_function("chocolatey.version", [target])
self.assertEqual(ret, {"Firefox": [upg_version]})
####################################################
# Test `chocolatey.uninstalled`
####################################################
# uninstall the package
log.debug("Testing chocolatey.uninstalled")
ret = self.run_state("chocolatey.uninstalled", name=target)
self.assertSaltTrueReturn(ret)
# Verify the package is uninstalled
log.debug("Verifying uninstall success")
ret = self.run_function("chocolatey.version", [target])
self.assertEqual(ret, {})
finally:
# Always uninstall
log.debug("Uninstalling %s", target)
self.run_function("chocolatey.uninstall", [target])
| Python | 0.000001 | @@ -1191,32 +1191,68 @@
ey.uninstalled%60%0A
+ - %60chocolatey.source_added%60%0A
%22%22%22%0A
@@ -1558,24 +1558,113 @@
= %2257.0.2%22%0A
+ src_name = %22test_repo%22%0A src_location = %22https://repo.test.com/chocolatey%22%0A
log.
@@ -3690,24 +3690,648 @@
l(ret, %7B%7D)%0A%0A
+ ####################################################%0A # Test %60chocolatey.source_added%60%0A ####################################################%0A # add the source%0A log.debug(%22Testing chocolatey.source_added%22)%0A ret = self.run_state(%22chocolatey.source_added%22, name=src_name, source_location=src_location)%0A self.assertSaltTrueReturn(ret)%0A%0A # Verify the source is added%0A log.debug(%22Verifying source_add success%22)%0A ret = self.run_function(%22chocolatey.list_sources%22)%0A self.assertTrue(source_name in ret.keys())%0A%0A
fina
|
585317f3a03f55f6487a98446d4a9279f91714d2 | Add a test of the linearity of scalar multiplication | tests/test_vector2_scalar_multiplication.py | tests/test_vector2_scalar_multiplication.py | import pytest # type: ignore
from hypothesis import given
from hypothesis.strategies import floats
from utils import vectors
from ppb_vector import Vector2
@pytest.mark.parametrize("x, y, expected", [
(Vector2(6, 1), 0, Vector2(0, 0)),
(Vector2(6, 1), 2, Vector2(12, 2)),
(Vector2(0, 0), 3, Vector2(0, 0)),
(Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)),
(Vector2(1, 2), 0.1, Vector2(0.1, 0.2))
])
def test_scalar_multiplication(x, y, expected):
assert x * y == expected
@given(
x=floats(min_value=-1e75, max_value=1e75),
y=floats(min_value=-1e75, max_value=1e75),
v=vectors(max_magnitude=1e150)
)
def test_scalar_associative(x: float, y: float, v: Vector2):
left = (x * y) * v
right = x * (y * v)
assert left.isclose(right)
| Python | 0.002828 | @@ -771,8 +771,242 @@
(right)%0A
+%0A@given(%0A l=floats(min_value=-1e150, max_value=1e150),%0A x=vectors(max_magnitude=1e150),%0A y=vectors(max_magnitude=1e150),%0A)%0Adef test_scalar_linear(l: float, x: Vector2, y: Vector2):%0A assert (l * (x + y)).isclose(l*x + l*y)%0A
|
cecbb5951ef806c5b4b7b6894c05e4d086730fb0 | order fy descending (newest on top) | base_ordered/ordered.py | base_ordered/ordered.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 Camptocamp Austria (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class purchase_order(osv.osv):
_inherit = "purchase.order"
_order = 'date_order desc, id desc'
purchase_order()
class sale_order(osv.osv):
_inherit = "sale.order"
_order = 'date_order desc, id desc'
sale_order()
class stock_picking(osv.osv):
_inherit = "stock.picking"
_order = 'date desc, id desc'
stock_picking()
class stock_move(osv.osv):
_inherit = "stock.move"
_order = 'date desc, id desc'
stock_move()
class account_invoice(osv.osv):
_inherit = "account.invoice"
_order = 'date_invoice desc, id desc'
account_invoice()
| Python | 0 | @@ -1652,12 +1652,146 @@
_invoice()%0A%0A
+class account_fiscalyear(osv.osv):%0A _inherit = %22account.fiscalyear%22%0A _order = 'date_start desc, id desc'%0A%0Aaccount_fiscalyear()%0A%0A
|
42ca6c8bfc2b7598acd880a013f7898db5245004 | Add dependency prefixes to CMAKE_PREFIX_PATH | lib/spack/spack/build_environment.py | lib/spack/spack/build_environment.py | """
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the bulid environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from whcih someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import os
import shutil
import multiprocessing
import platform
from llnl.util.filesystem import *
import spack
import spack.compilers as compilers
from spack.util.executable import Executable, which
from spack.util.environment import *
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_LIB = 'SPACK_LIB'
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_DEPENDENCIES = 'SPACK_DEPENDENCIES'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SPEC = 'SPACK_SPEC'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can
specify parallel or not on a per-invocation basis. Using
'parallel' as a kwarg will override whatever the package's
global setting is, so you can either default to true or false
and override particular calls.
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, parallel):
super(MakeExecutable, self).__init__(name)
self.parallel = parallel
def __call__(self, *args, **kwargs):
parallel = kwargs.get('parallel', self.parallel)
disable_parallel = env_flag(SPACK_NO_PARALLEL_MAKE)
if parallel and not disable_parallel:
jobs = "-j%d" % multiprocessing.cpu_count()
args = (jobs,) + args
super(MakeExecutable, self).__call__(*args, **kwargs)
def set_compiler_environment_variables(pkg):
assert(pkg.spec.concrete)
compiler = compilers.compiler_for_spec(pkg.spec.compiler)
# Set compiler variables used by CMake and autotools
os.environ['CC'] = 'cc'
os.environ['CXX'] = 'c++'
os.environ['F77'] = 'f77'
os.environ['FC'] = 'f90'
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
os.environ['SPACK_CC'] = compiler.cc
if compiler.cxx:
os.environ['SPACK_CXX'] = compiler.cxx
if compiler.f77:
os.environ['SPACK_F77'] = compiler.f77
if compiler.fc:
os.environ['SPACK_FC'] = compiler.fc
os.environ['SPACK_COMPILER_SPEC'] = str(pkg.spec.compiler)
def set_build_environment_variables(pkg):
"""This ensures a clean install environment when we build packages.
"""
# This tells the compiler script where to find the Spack installation.
os.environ[SPACK_LIB] = spack.lib_path
# Add spack build environment path with compiler wrappers first in
# the path. We handle case sensitivity conflicts like "CC" and
# "cc" by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = [spack.build_env_path,
join_path(spack.build_env_path, 'case-insensitive')]
path_put_first("PATH", env_paths)
path_set(SPACK_ENV_PATH, env_paths)
# Prefixes of all of the package's dependencies go in
# SPACK_DEPENDENCIES
dep_prefixes = [d.package.prefix for d in pkg.spec.dependencies.values()]
path_set(SPACK_DEPENDENCIES, dep_prefixes)
# Install prefix
os.environ[SPACK_PREFIX] = pkg.prefix
# Remove these vars from the environment during build becaus they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
pop_keys(os.environ, "LD_LIBRARY_PATH", "LD_RUN_PATH", "DYLD_LIBRARY_PATH")
# Add bin directories from dependencies to the PATH for the build.
bin_dirs = ['%s/bin' % prefix for prefix in dep_prefixes]
path_put_first('PATH', [bin for bin in bin_dirs if os.path.isdir(bin)])
# Working directory for the spack command itself, for debug logs.
if spack.debug:
os.environ[SPACK_DEBUG] = "TRUE"
os.environ[SPACK_SPEC] = str(pkg.spec)
os.environ[SPACK_DEBUG_LOG_DIR] = spack.spack_working_dir
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
m = pkg.module
m.make = MakeExecutable('make', pkg.parallel)
m.gmake = MakeExecutable('gmake', pkg.parallel)
# number of jobs spack prefers to build with.
m.make_jobs = multiprocessing.cpu_count()
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
# TODO: shouldn't really use "which" here. Consider adding notion
# TODO: of build dependencies, as opposed to link dependencies.
# TODO: Currently, everything is a link dependency, but tools like
# TODO: this shouldn't be.
m.cmake = which("cmake")
# standard CMake arguments
m.std_cmake_args = ['-DCMAKE_INSTALL_PREFIX=%s' % pkg.prefix,
'-DCMAKE_BUILD_TYPE=None']
if platform.mac_ver()[0]:
m.std_cmake_args.append('-DCMAKE_FIND_FRAMEWORK=LAST')
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.mkdirp = mkdirp
m.install = install
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
| Python | 0.000002 | @@ -5354,16 +5354,187 @@
ng_dir%0A%0A
+ # Add dependencies to CMAKE_PREFIX_PATH%0A dep_prefixes = %5Bd.package.prefix for d in pkg.spec.dependencies.values()%5D%0A path_set(%22CMAKE_PREFIX_PATH%22, dep_prefixes)%0A%0A
%0Adef set
|
f13009ad215c570810abfa2275a0d04abae6d37e | Use server side searches. | wraptool.py | wraptool.py | #!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.request, json
import sys, os
import configparser
import shutil
from glob import glob
API_ROOT = 'http://wrapdb.mesonbuild.com/v1/'
help_templ = '''This program allows you to manage your Wrap dependencies
using the online wrap database http://wrapdb.mesonbuild.com.
Run this command in your top level source directory.
Usage:
%s <command> [options]
Commands:
list - show all available projects
search - search the db by name
install - install the specified project
update - update the project to its newest available release
info - show available versions of a project
status - show installed and available versions of your projects
'''
def print_help():
print(help_templ % sys.argv[0])
def get_result(urlstring):
u = urllib.request.urlopen(urlstring)
data = u.read().decode('utf-8')
jd = json.loads(data)
if jd['output'] != 'ok':
print('Got bad output from server.')
print(data)
sys.exit(1)
return jd
def get_projectlist():
jd = get_result(API_ROOT + 'projects')
projects = jd['projects']
return projects
def list_projects():
projects = get_projectlist()
for p in projects:
print(p)
def search(name):
# FIXME, add search to server side
projects = get_projectlist()
for p in projects:
if name in p:
print(p)
def get_latest_version(name):
# FIXME, use server's newest logic once it is working.
jd = get_result(API_ROOT + 'query/get_latest/' + name)
branch = jd['branch']
revision = jd['revision']
return (branch, revision)
def install(name):
if not os.path.isdir('subprojects'):
print('Subprojects dir not found. Run this script in your source root directory.')
sys.exit(1)
if os.path.isdir(os.path.join('subprojects', name)):
print('Subproject directory for this project already exists.')
sys.exit(1)
wrapfile = os.path.join('subprojects', name + '.wrap')
if os.path.exists(wrapfile):
print('Wrap file already exists.')
sys.exit(1)
(branch, revision) = get_latest_version(name)
u = urllib.request.urlopen(API_ROOT + 'projects/%s/%s/%s/get_wrap' % (name, branch, revision))
data = u.read()
open(wrapfile, 'wb').write(data)
print('Installed', name, 'branch', branch, 'revision', revision)
def get_current_version(wrapfile):
cp = configparser.ConfigParser()
cp.read(wrapfile)
cp = cp['wrap-file']
patch_url = cp['patch_url']
arr = patch_url.split('/')
branch = arr[-3]
revision = int(arr[-2])
return (branch, revision, cp['directory'], cp['source_filename'], cp['patch_filename'])
def update(name):
if not os.path.isdir('subprojects'):
print('Subprojects dir not found. Run this command in your source root directory.')
sys.exit(1)
wrapfile = os.path.join('subprojects', name + '.wrap')
if not os.path.exists(wrapfile):
print('Project', name, 'is not in use.')
sys.exit(1)
(branch, revision, subdir, src_file, patch_file) = get_current_version(wrapfile)
(new_branch, new_revision) = get_latest_version(name)
if new_branch == branch and new_revision == revision:
print('Project', name, 'is already up to date.')
sys.exit(0)
u = urllib.request.urlopen(API_ROOT + 'projects/%s/%s/%d/get_wrap' % (name, new_branch, new_revision))
data = u.read()
shutil.rmtree(os.path.join('subprojects', subdir), ignore_errors=True)
try:
os.unlink(os.path.join('subprojects/packagecache', src_file))
except FileNotFoundError:
pass
try:
os.unlink(os.path.join('subprojects/packagecache', patch_file))
except FileNotFoundError:
pass
open(wrapfile, 'wb').write(data)
print('Updated', name, 'to branch', new_branch, 'revision', new_revision)
def info(name):
jd = get_result(API_ROOT + 'projects/' + name)
versions = jd['versions']
if len(versions) == 0:
print('No available versions of', name)
sys.exit(0)
print('Available versions of %s:' % name)
for v in versions:
print(' ', v['branch'], v['revision'])
def status():
print('Subproject status')
for w in glob('subprojects/*.wrap'):
name = os.path.split(w)[1][:-5]
try:
(latest_branch, latest_revision) = get_latest_version(name)
except Exception:
print('', name, 'not available in wrapdb.')
continue
try:
(current_branch, current_revision, _, _, _) = get_current_version(w)
except Exception:
print('Wrap file not from wrapdb.')
continue
if current_branch == latest_branch and current_revision == latest_revision:
print('', name, 'up to date. Branch %s, revision %d.' % (current_branch, current_revision))
else:
print('', name, 'not up to date. Have %s %d, but %s %d is available.' % (current_branch, current_revision, latest_branch, latest_revision))
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
print_help()
sys.exit(0)
command = sys.argv[1]
args = sys.argv[2:]
if command == 'list':
list_projects()
elif command == 'search':
if len(args) != 1:
print('Search requires exactly one argument.')
sys.exit(1)
search(args[0])
elif command == 'install':
if len(args) != 1:
print('Install requires exactly one argument.')
sys.exit(1)
install(args[0])
elif command == 'update':
if len(args) != 1:
print('update requires exactly one argument.')
sys.exit(1)
update(args[0])
elif command == 'info':
if len(args) != 1:
print('info requires exactly one argument.')
sys.exit(1)
info(args[0])
elif command == 'status':
status()
else:
print('Unknown command', command)
sys.exit(1)
| Python | 0 | @@ -1824,74 +1824,57 @@
-# FIXME, add search to server side%0A projects = get_projectlist(
+jd = get_result(API_ROOT + 'query/byname/' + name
)%0A
@@ -1880,32 +1880,36 @@
for p in
+jd%5B'
projects
:%0A if
@@ -1900,36 +1900,12 @@
ects
+'%5D
:%0A
- if name in p:%0A
@@ -1952,67 +1952,8 @@
e):%0A
- # FIXME, use server's newest logic once it is working.%0A
|
45feb9326f359fb94ebeb17867bcd098ba6fd4b4 | Make sure _start_event and _end_event are initialized | glue_exp/tools/floodfill_selection/floodfill_selection.py | glue_exp/tools/floodfill_selection/floodfill_selection.py | import os
import numpy as np
from glue.viewers.common.qt.mouse_mode import MouseMode
from glue.external.qt import QtGui
from .floodfill_scipy import floodfill_scipy
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.subset import MaskSubsetState
__all__ = ['FloodfillSelectionTool']
ROOT = os.path.dirname(__file__)
WARN_THRESH = 10000000 # warn when floodfilling large images
class FloodfillSelectionTool(object):
def __init__(self, widget=None):
self.widget = widget
self.data_object = None
def _get_modes(self, axes):
self._mode = FloodfillMode(axes,
move_callback=self._floodfill_roi,
release_callback=self._floodfill_roi)
return [self._mode]
# @set_cursor(Qt.WaitCursor)
def _floodfill_roi(self, mode):
"""
Callback for FloodfillMode.
"""
if mode._start_event is None or mode._end_event is None:
return
data = self.widget.client.display_data
att = self.widget.client.display_attribute
if data is None or att is None:
return
if data.size > WARN_THRESH and not self.widget._confirm_large_image(data):
return
# Determine length of dragging action in units relative to the figure
width, height = mode._start_event.canvas.get_width_height()
dx = (mode._end_event.x - mode._start_event.x) / width
dy = (mode._end_event.y - mode._start_event.y) / height
length = np.hypot(dx, dy)
# Make sure the coordinates are converted to the nearest integer
x = int(round(mode._start_event.xdata))
y = int(round(mode._start_event.ydata))
z = int(round(self.widget.client.slice[self.profile_axis]))
# We convert the length in relative figure units to a threshold - we make
# it so that moving by 0.1 produces a threshold of 1.1, 0.2 -> 2, 0.3 -> 11
# etc
threshold = 1 + 10 ** (length / 0.1 - 1)
# coordinate should be integers as index for array
values = np.asarray(data[att], dtype=float)
mask = floodfill_scipy(values, (z, y, x), threshold)
if mask is not None:
cids = data.pixel_component_ids
subset_state = MaskSubsetState(mask, cids)
mode = EditSubsetMode()
mode.update(data, subset_state, focus_data=data)
@property
def profile_axis(self):
slc = self.widget.client.slice
candidates = [i for i, s in enumerate(slc) if s not in ['x', 'y']]
return max(candidates, key=lambda i: self.widget.client.display_data.shape[i])
def _display_data_hook(self, data):
pass
def close(self):
pass
class FloodfillMode(MouseMode):
"""
Creates selection by using the mouse to pick regions using the flood fill
algorithm: https://en.wikipedia.org/wiki/Flood_fill
"""
def __init__(self, *args, **kwargs):
super(FloodfillMode, self).__init__(*args, **kwargs)
self.icon = QtGui.QIcon(os.path.join(ROOT, "glue_floodfill.png"))
self.mode_id = 'Flood fill'
self.action_text = 'Flood fill'
self.tool_tip = ('Define a region of interest with the flood fill '
'algorithm. Click to define the starting pixel and '
'drag (keeping the mouse clicked) to grow the '
'selection.')
def press(self, event):
self._start_event = event
super(FloodfillMode, self).press(event)
def move(self, event):
self._end_event = event
super(FloodfillMode, self).move(event)
def release(self, event):
self._end_event = event
super(FloodfillMode, self).release(event)
self._start_event = None
self._end_event = None
| Python | 0.008759 | @@ -3468,16 +3468,80 @@
ction.')
+%0A self._start_event = None%0A self._end_event = None
%0A%0A de
|
2fedc43c50bd933924046b6f79633687a452116a | bump version | src/mrfitty/__init__.py | src/mrfitty/__init__.py | __version__ = '0.11.0'
| Python | 0 | @@ -11,13 +11,13 @@
_ = '0.1
-1
+2
.0'%0A
|
f48601ceacbf9d05412aa5f45b6d4f9bb46d266e | update the script for GMC | utilities/scripts/correct_momentum_conservation.py | utilities/scripts/correct_momentum_conservation.py | #!/usr/bin/env python3
import sys
from numpy import *
from os import path
def parse_data(data_line):
data = data_line.split()
data = list(map(int, data[:2])) + list(map(float, data[2:]))
return(data)
OSCAR_file_path = str(sys.argv[1])
OSCAR_file = open(OSCAR_file_path, 'r')
output_file = open('OSCAR_w_GMC.DAT', 'w')
line_count = 0
Nparticle = 0
event_header_line = 3
iev = 0
event_data = []
for temp_line in OSCAR_file:
if line_count < 3:
output_file.write(temp_line)
if line_count == event_header_line:
output_file.write(temp_line)
Nparticle = int(temp_line.split()[1])
event_header_line += Nparticle + 1
iev += 1
print("analysis event %d with %d particles" % (iev, Nparticle))
event_data = []
if line_count > (event_header_line - Nparticle - 1):
data = parse_data(temp_line)
event_data.append(data)
if line_count > 3 and line_count == event_header_line - 1:
event_data = array(event_data)
CM_Px = sum(event_data[:, 2])
CM_Py = sum(event_data[:, 3])
CM_Pz = sum(event_data[:, 4])
total_E = sum(event_data[:, 5])
print("total energy = %g GeV" % total_E)
print("correction per particle: delta_px = %g GeV, "
"delta_py = %g GeV, delta_pz = %g GeV"
% (CM_Px/Nparticle, CM_Py/Nparticle, CM_Pz/Nparticle))
event_data[:, 2] -= CM_Px/Nparticle
event_data[:, 3] -= CM_Py/Nparticle
event_data[:, 4] -= CM_Pz/Nparticle
event_data[:, 5] = sqrt(event_data[:, 2]**2. + event_data[:, 3]**2.
+ event_data[:, 4]**2. + event_data[:, 6]**2.)
for iline in range(Nparticle):
output_file.write(
"%d %d %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n"
% (event_data[iline, 0], event_data[iline, 1],
event_data[iline, 2], event_data[iline, 3],
event_data[iline, 4], event_data[iline, 5],
event_data[iline, 6], event_data[iline, 7],
event_data[iline, 8], event_data[iline, 9],
event_data[iline, 10]))
line_count += 1
OSCAR_file.close()
output_file.close()
| Python | 0 | @@ -14,17 +14,16 @@
v python
-3
%0A%0Aimport
@@ -328,16 +328,44 @@
, 'w')%0A%0A
+a = 2 # w_i = pT%5Ea/%3CpT%5Ea%3E%0A%0A
line_cou
@@ -1156,173 +1156,505 @@
-total_E = sum(event_data%5B:, 5%5D)%0A print(%22total energy = %25g GeV%22 %25 total_E)%0A print(%22correction per particle: delta_px = %25g GeV, %22%0A %22delta_py
+mean_Px = CM_Px/Nparticle%0A mean_Py = CM_Py/Nparticle%0A mean_Pz = CM_Pz/Nparticle%0A pT_array = sqrt(event_data%5B:, 2%5D**2. + event_data%5B:, 3%5D**2.)%0A mean_pT = mean(pT_array)%0A mean_pTsq = mean(pT_array**2.)%0A weight = (pT_array**a)/mean(pT_array**a)%0A total_E = sum(event_data%5B:, 5%5D)%0A print(%22total energy = %25g GeV%22 %25 total_E)%0A print(%22%3Cpx%3E = %25g GeV, %3Cpy%3E = %25g GeV, %3Cpz%3E = %25g GeV%22%0A %25 (mean_Px, mean_Py, mean_Pz))%0A print(%22%3CpT%3E
= %25
@@ -1664,16 +1664,14 @@
eV,
-delta_pz
+%3CpT%5E2%3E
= %25
@@ -1667,32 +1667,34 @@
%3CpT%5E2%3E = %25g GeV
+%5E2
%22%0A
@@ -1700,57 +1700,26 @@
%25 (
-CM_Px/Nparticle, CM_Py/Nparticle, CM_Pz/Nparticle
+mean_pT, mean_pTsq
))%0A
@@ -1745,31 +1745,30 @@
, 2%5D -=
-CM_Px/Nparticle
+mean_Px*weight
%0A
@@ -1788,31 +1788,30 @@
, 3%5D -=
-CM_Py/Nparticle
+mean_Py*weight
%0A
@@ -1831,31 +1831,30 @@
, 4%5D -=
-CM_Pz/Nparticle
+mean_Pz*weight
%0A
@@ -2093,72 +2093,94 @@
%22%25
+10
d %25
+10
d %25
+24
.16e %25
+24
.16e %25
+24
.16e %25
+24
.16e %25
+24
.16e %25
+24
.16e %25
+24
.16e %25
+24
.16e %25
+24
.16e
|
d7baf900a39e9a209a737b4370b65e8e67902da0 | Version bump to v3.6.7 | geomdl/__init__.py | geomdl/__init__.py | """ NURBS & B-Spline 2D/3D curve and surface evaluation library in pure Python
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
__author__ = "Onur Rauf Bingol"
__version__ = "3.6.6"
__license__ = "MIT"
| Python | 0 | @@ -186,17 +186,17 @@
= %223.6.
-6
+7
%22%0A__lice
|
f5e2e7cbb494fc111efcf4abd5c744091e9ee8aa | Fix function name error | module/submodules/graphs.py | module/submodules/graphs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import time
from shinken.log import logger
from .metamodule import MetaModule
class GraphsMetaModule(MetaModule):
_functions = ['get_graph_uris']
_custom_log = "You should configure the module 'graphite' in your broker and the module 'ui-graphite' in webui.cfg file to be able to display graphs."
def get_graph_uris(self, elt, graphstart=None, graphend=None, duration=None, source='detail'):
''' Aggregate the get_graph_uris of all the submodules.
The source parameter defines the source of the calling:
Are we displaying graphs for the element detail page (detail),
or a widget in the dashboard (dashboard) ?
If duration is not None, we consider it as a number of seconds to graph and
we call the module get_relative_graphs_uri
If get_relative_graphs_uri is not a module function we compute graphstart and
graphend and we call we call the module get_graphs_uri
If graphstart and graphend are not None, we call the module get_graphs_uri
'''
uris = []
for mod in self.modules:
if not duration:
uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
else:
f = getattr(mod, 'get_relative_graphs_uri', None)
if f and callable(f):
uris.extend(f(elt, duration, source))
else:
graphend = time.time()
graphstart = graphend - duration
uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
logger.debug("[WebUI] Got graphs: %s", uris)
return uris
| Python | 0.002384 | @@ -1419,21 +1419,21 @@
ve_graph
-s
_uri
+s
', None)
|
6ba01511dd8a9e0fd02de9553e5646e0de7aae0a | Version bump to v3.6.8 | geomdl/__init__.py | geomdl/__init__.py | """ NURBS & B-Spline 2D/3D curve and surface evaluation library in pure Python
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
__author__ = "Onur Rauf Bingol"
__version__ = "3.6.7"
__license__ = "MIT"
| Python | 0 | @@ -190,9 +190,9 @@
3.6.
-7
+8
%22%0A__
|
e69542c01959e7cf874c6ca1ae5c94d0c9a0ba1f | Fix tarball URL's for htslib (#5993) | var/spack/repos/builtin/packages/htslib/package.py | var/spack/repos/builtin/packages/htslib/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Htslib(AutotoolsPackage):
"""C library for high-throughput sequencing data formats."""
homepage = "https://github.com/samtools/htslib"
url = "https://github.com/samtools/htslib/releases/download/1.3.1/htslib-1.3.1.tar.bz2"
version('1.6', 'd6fd14e208aca7e08cbe9072233d0af9')
version('1.4', '2a22ff382654c033c40e4ec3ea880050')
version('1.3.1', '16d78f90b72f29971b042e8da8be6843')
version('1.2', '64026d659c3b062cfb6ddc8a38e9779f',
url='https://github.com/samtools/htslib/archive/1.2.tar.gz')
depends_on('zlib')
depends_on('bzip2', when="@1.4:")
depends_on('xz', when="@1.4:")
depends_on('m4', when="@1.2")
depends_on('autoconf', when="@1.2")
depends_on('automake', when="@1.2")
depends_on('libtool', when="@1.2")
| Python | 0 | @@ -1402,105 +1402,8 @@
lib%22
-%0A url = %22https://github.com/samtools/htslib/releases/download/1.3.1/htslib-1.3.1.tar.bz2%22
%0A%0A
@@ -1624,75 +1624,8 @@
79f'
-,%0A url='https://github.com/samtools/htslib/archive/1.2.tar.gz'
)%0A%0A
@@ -1826,32 +1826,32 @@
', when=%[email protected]%22)%0A
-
depends_on('
@@ -1857,24 +1857,469 @@
'libtool', when=%[email protected]%22)%0A
+%0A # v1.2 uses the automagically assembled tarball from .../archive/...%0A # everything else uses the tarballs uploaded to the release%0A def url_for_version(self, version):%0A if version.string == '1.2':%0A return 'https://github.com/samtools/htslib/archive/1.2.tar.gz'%0A else:%0A url = %22https://github.com/samtools/htslib/releases/download/%7B0%7D/htslib-%7B0%7D.tar.bz2%22%0A return url.format(version.dotted)%0A
|
1a2c69e95eb02010f0a72aebe2554be58db63f42 | Generate name from German title if possible | ckanext/switzerland/dcat/harvesters.py | ckanext/switzerland/dcat/harvesters.py | # flake8: noqa
import json
import ckan.plugins as p
import ckan.model as model
from ckanext.harvest.model import HarvestObject
from ckanext.dcat.parsers import RDFParserException, RDFParser
from ckanext.dcat.interfaces import IDCATRDFHarvester
from ckanext.dcat.harvesters.rdf import DCATRDFHarvester
import logging
log = logging.getLogger(__name__)
class SwissDCATRDFHarvester(DCATRDFHarvester):
def info(self):
return {
'name': 'dcat_ch_rdf',
'title': 'DCAT-AP Switzerland RDF Harvester',
'description': 'Harvester for DCAT-AP Switzerland datasets from an RDF graph' # noqa
}
def _get_guid(self, dataset_dict, source_url=None): # noqa
'''
Try to get a unique identifier for a harvested dataset
It will be the first found of:
* URI (rdf:about)
* dcat:identifier
* Source URL + Dataset name
* Dataset name
The last two are obviously not optimal, as depend on title, which
might change.
Returns None if no guid could be decided.
'''
guid = None
for extra in dataset_dict.get('extras', []):
if extra['key'] == 'uri' and extra['value']:
return extra['value']
if dataset_dict.get('uri'):
return dataset_dict['uri']
for extra in dataset_dict.get('extras', []):
if extra['key'] == 'identifier' and extra['value']:
return extra['value']
if dataset_dict.get('identifier'):
return dataset_dict['identifier']
for extra in dataset_dict.get('extras', []):
if extra['key'] == 'dcat_identifier' and extra['value']:
return extra['value']
if dataset_dict.get('name'):
guid = dataset_dict['name']
if source_url:
guid = source_url.rstrip('/') + '/' + guid
return guid
def gather_stage(self, harvest_job):
log.debug('In DCATRDFHarvester gather_stage')
# Get file contents
url = harvest_job.source.url
for harvester in p.PluginImplementations(IDCATRDFHarvester):
url, before_download_errors = harvester.before_download(url, harvest_job)
for error_msg in before_download_errors:
self._save_gather_error(error_msg, harvest_job)
if not url:
return False
rdf_format = None
if harvest_job.source.config:
rdf_format = json.loads(harvest_job.source.config).get("rdf_format")
content, rdf_format = self._get_content_and_type(url, harvest_job, 1, content_type=rdf_format)
# TODO: store content?
for harvester in p.PluginImplementations(IDCATRDFHarvester):
content, after_download_errors = harvester.after_download(content, harvest_job)
for error_msg in after_download_errors:
self._save_gather_error(error_msg, harvest_job)
if not content:
return False
# TODO: profiles conf
parser = RDFParser()
try:
parser.parse(content, _format=rdf_format)
except RDFParserException, e:
self._save_gather_error('Error parsing the RDF file: {0}'.format(e), harvest_job)
return False
guids_in_source = []
object_ids = []
for dataset in parser.datasets():
if not dataset.get('name'):
dataset['name'] = self._gen_new_name(dataset['title']['de'])
# Unless already set by the parser, get the owner organization (if any)
# from the harvest source dataset
if not dataset.get('owner_org'):
source_dataset = model.Package.get(harvest_job.source.id)
if source_dataset.owner_org:
dataset['owner_org'] = source_dataset.owner_org
# Try to get a unique identifier for the harvested dataset
guid = self._get_guid(dataset)
if not guid:
log.error('Could not get a unique identifier for dataset: {0}'.format(dataset))
continue
dataset['extras'].append({'key': 'guid', 'value': guid})
guids_in_source.append(guid)
obj = HarvestObject(guid=guid, job=harvest_job,
content=json.dumps(dataset))
obj.save()
object_ids.append(obj.id)
# Check if some datasets need to be deleted
object_ids_to_delete = self._mark_datasets_for_deletion(guids_in_source, harvest_job)
object_ids.extend(object_ids_to_delete)
return object_ids
| Python | 1 | @@ -1,252 +1,4 @@
-# flake8: noqa%0A%0Aimport json%0A%0Aimport ckan.plugins as p%0Aimport ckan.model as model%0A%0Afrom ckanext.harvest.model import HarvestObject%0A%0Afrom ckanext.dcat.parsers import RDFParserException, RDFParser%0Afrom ckanext.dcat.interfaces import IDCATRDFHarvester%0A
from
@@ -1691,1558 +1691,102 @@
def
-gather_stage(self, harvest_job):%0A%0A log.debug('In DCATRDFHarvester gather_stage')%0A%0A # Get file contents%0A url = harvest_job.source.url%0A%0A for harvester in p.PluginImplementations(IDCATRDFHarvester):%0A url, before_download_errors = harvester.before_download(url, harvest_job)%0A%0A for error_msg in before_download_errors:%0A self._save_gather_error(error_msg, harvest_job)%0A%0A if not url:%0A return False%0A%0A rdf_format = None%0A if harvest_job.source.config:%0A rdf_format = json.loads(harvest_job.source.config).get(%22rdf_format%22)%0A content, rdf_format = self._get_content_and_type(url, harvest_job, 1, content_type=rdf_format)%0A%0A # TODO: store content?%0A for harvester in p.PluginImplementations(IDCATRDFHarvester):%0A content, after_download_errors = harvester.after_download(content, harvest_job)%0A%0A for error_msg in after_download_errors:%0A self._save_gather_error(error_msg, harvest_job)%0A%0A if not content:%0A return False%0A%0A # TODO: profiles conf%0A parser = RDFParser()%0A%0A try:%0A parser.parse(content, _format=rdf_format)%0A except RDFParserException, e:%0A self._save_gather_error('Error parsing the RDF file: %7B0%7D'.format(e), harvest_job)%0A return False%0A%0A guids_in_source = %5B%5D%0A object_ids = %5B%5D%0A for dataset in parser.datasets():%0A if not dataset.get('name'):%0A dataset%5B'name'%5D = self
+_gen_new_name(self, title):%0A try:%0A return super(SwissDCATRDFHarvester, self)
._ge
@@ -1800,325 +1800,53 @@
ame(
-dataset%5B'
title
-'%5D
%5B'de'%5D)
-%0A%0A # Unless already set by the parser, get the owner organization (if any)%0A # from the harvest source dataset%0A if not dataset.get('owner_org'):%0A source_dataset = model.Package.get(harvest_job.source.id)%0A if source_dataset.owner_
+ # noqa%0A except TypeErr
or
-g
:%0A
@@ -1859,841 +1859,75 @@
-
- dataset%5B'owner_org'%5D = source_dataset.owner_org%0A%0A # Try to get a unique identifier for the harvested dataset%0A guid = self._get_guid(dataset)%0A%0A if not guid:%0A log.error('Could not get a unique identifier for dataset: %7B0%7D'.format(dataset))%0A continue%0A%0A dataset%5B'extras'%5D.append(%7B'key': 'guid', 'value': guid%7D)%0A guids_in_source.append(guid)%0A%0A obj = HarvestObject(guid=guid, job=harvest_job,%0A content=json.dumps(dataset))%0A%0A obj.save()%0A object_ids.append(obj.id)%0A%0A # Check if some datasets need to be deleted%0A object_ids_to_delete =
+return super(SwissDCATRDFHarvester,
self
+)
._
-mark_datasets_for_deletion(guids_in_source, harvest_job)%0A%0A object_ids.extend(object_ids_to_delete)%0A%0A return object_ids%0A
+gen_new_name(title) # noqa
%0A
|
7f29770766a30bf821689960189e95526eee6bdc | print python version if using file directly, not as import | getDataRemotely.py | getDataRemotely.py | import sys
from dictAsFile_wrapper import *
def run():
hashtableName = 'hashtable.pkl'
data = {}
# use different import based on python version number:
if (sys.version_info > (3, 0)):
# python 3:
print('python 3')
import urllib.request
url = 'https://raw.githubusercontent.com/hchiam/cognateLanguage/master/hashtable.pkl'
urllib.request.urlretrieve(url) # download file
data = readFileToDict(hashtableName)
# with urllib.request.urlopen('https://raw.githubusercontent.com/hchiam/cognateLanguage/master/output_shortlist.txt') as response:
# line = response.readline().decode('utf-8').replace('\n','')
# while line != '':
# data.append(line)
# line = response.readline().decode('utf-8').replace('\n','')
else:
# python 2:
print('python 2')
import urllib2
url = 'https://raw.githubusercontent.com/hchiam/cognateLanguage/master/hashtable.pkl'
response = urllib2.urlopen(url) # download file
data = readFileToDict(hashtableName)
# response = urllib2.urlopen('https://raw.githubusercontent.com/hchiam/cognateLanguage/master/output_shortlist.txt')
# data = response.read().split('\n')
return data
# this if statement is so that the following code only runs if this .py file is not being imported
if __name__ == '__main__':
data = run()
# debug print out:
print ('debug output: data[\"hi\"] = ' + data["hi"]) | Python | 0.000001 | @@ -220,16 +220,55 @@
thon 3:%0A
+ if __name__ == '__main__':%0A
@@ -900,16 +900,55 @@
thon 2:%0A
+ if __name__ == '__main__':%0A
|
9c218079f00e9b3c7285cd94dcc7836531f722a5 | Install RMPISNOW wrapper in prefix.bin for r-snow (#16479) | var/spack/repos/builtin/packages/r-snow/package.py | var/spack/repos/builtin/packages/r-snow/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSnow(RPackage):
"""Support for simple parallel computing in R."""
homepage = "https://cloud.r-project.org/package=snow"
url = "https://cloud.r-project.org/src/contrib/snow_0.4-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/snow"
version('0.4-3', sha256='8512537daf334ea2b8074dbb80cf5e959a403a78d68bc1e97664e8a4f64576d8')
version('0.4-2', sha256='ee070187aea3607c9ca6235399b3db3e181348692405d038e962e06aefccabd7')
depends_on('[email protected]:', type=('build', 'run'))
| Python | 0 | @@ -739,8 +739,147 @@
'run'))%0A
+%0A @run_after('install')%0A def install_wrapper(self):%0A mkdir(self.prefix.bin)%0A install('inst/RMPISNOW', self.prefix.bin)%0A
|
5c7d690902b19b6c333a9fbced80ffa0f1d1b81e | update cfg_validator tests for breakage | congress/tests/cfg_validator/test_parsing.py | congress/tests/cfg_validator/test_parsing.py | #
# Copyright (c) 2017 Orange.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Tests for the unmarshaling of options by the driver"""
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from congress.cfg_validator import parsing
from congress.tests import base
LOG = logging.getLogger(__name__)
OPT_TEST = {
u'positional': False, u'kind': u'BoolOpt',
u'deprecated_reason': None,
u'help': u'Enables or disables inter-process locks.',
u'default': False, u'type': {u'type': u'Boolean'},
u'required': False, u'sample_default': None,
u'deprecated_opts': [{u'group': u'DEFAULT', u'name': None}],
u'deprecated_for_removal': False,
u'dest': u'disable_process_locking',
u'secret': False, u'short': None, u'mutable': False,
u'deprecated_since': None, u'metavar': None,
u'advanced': False, u'name': u'disable_process_locking'}
DICT_NS_TEST = {
u'DEFAULT': {u'object': None, u'namespaces': []},
u'oslo_concurrency': {
u'object': None,
u'namespaces': [[u'oslo.concurrency', [OPT_TEST]]]}}
class TestParsing(base.TestCase):
"""Tests for the unmarshaling of options by the driver"""
def test_add_namespace(self):
"""Test for adding a namespace"""
conf = cfg.ConfigOpts()
parsing.add_namespace(conf, DICT_NS_TEST, 'abcde-12345')
keys = conf.keys()
self.assertEqual(1, len(keys))
self.assertIn(u'oslo_concurrency', keys)
self.assertIsNotNone(
conf.get(u'oslo_concurrency').get(u'disable_process_locking'))
def test_construct_conf_manager(self):
"""Test for building a conf manager"""
conf = parsing.construct_conf_manager([DICT_NS_TEST])
self.assertIsInstance(conf, cfg.ConfigOpts)
keys = conf.keys()
self.assertEqual(1, len(keys))
self.assertIn(u'oslo_concurrency', keys)
def test_make_group(self):
"""Test for parsing a group"""
grp = parsing.make_group('group', 'group_title', 'group help')
self.assertIsInstance(grp, cfg.OptGroup)
self.assertEqual("group", grp.name)
self.assertEqual("group_title", grp.title)
def test_make_opt(self):
"""Test for parsing an option"""
descr = {
u'positional': False,
u'kind': u'Opt',
u'deprecated_reason': None,
u'help': u'Help me',
u'default': None,
u'type': {u'type': u'String'},
u'required': False, u'sample_default': None,
u'deprecated_opts': [], u'deprecated_for_removal': False,
u'dest': u'name',
u'secret': False,
u'short': None,
u'mutable': False,
u'deprecated_since': None,
u'metavar': None,
u'advanced': False,
u'name': u'name'}
opt = parsing.make_opt(descr, 'abcd-1234', 'efgh-5678')
self.assertIsInstance(opt, parsing.IdentifiedOpt)
self.assertEqual("name", opt.name)
self.assertEqual('abcd-1234', opt.id_)
self.assertEqual('efgh-5678', opt.ns_id)
def test_make_type(self):
"""Test for parsing a type"""
typ1 = parsing.make_type({u'type': u'String'})
self.assertIsInstance(typ1, types.String)
typ2 = parsing.make_type({u'type': u'Integer'})
self.assertIsInstance(typ2, types.Integer)
typ3 = parsing.make_type(
{u'item_type': {u'type': u'Boolean'}, u'type': u'List'})
self.assertIsInstance(typ3, types.List)
self.assertIsInstance(typ3.item_type, types.Boolean)
| Python | 0 | @@ -1819,24 +1819,68 @@
onfigOpts()%0A
+ initial_keys_len = len(conf.keys())%0A
pars
@@ -1976,32 +1976,51 @@
elf.assertEqual(
+initial_keys_len +
1, len(keys))%0A
@@ -2258,24 +2258,80 @@
manager%22%22%22%0A
+ initial_keys_len = len(cfg.ConfigOpts().keys())%0A
conf
@@ -2484,16 +2484,35 @@
rtEqual(
+initial_keys_len +
1, len(k
|
f42744558b989f8122f67d24bf65c8514eb516cb | Use better names for generated IR files. | runac/__init__.py | runac/__init__.py | from . import tokenizer, ast, blocks, ti, specialize, codegen
from util import Error
import sys, os, subprocess, tempfile
BASE = os.path.dirname(__path__[0])
CORE_DIR = os.path.join(BASE, 'core')
TRIPLES = {
'darwin': 'x86_64-apple-darwin11.0.0',
'linux2': 'x86_64-pc-linux-gnu',
}
def tokenize(f):
return tokenizer.tokenize(f)
def parse(tokens):
return ast.parse(tokens)
def module(ast):
mod = blocks.Module(ast)
for fn in os.listdir(CORE_DIR):
if not fn.endswith('.rns'):
continue
with open(os.path.join(CORE_DIR, fn)) as f:
mod.merge(blocks.Module(parse(tokenize(f))))
return mod
def type(mod):
ti.typer(mod)
def spec(mod):
specialize.specialize(mod)
def generate(mod):
triple = 'target triple = "%s"\n\n' % TRIPLES[sys.platform]
with open('core/rt.ll') as f:
rt = f.read()
return triple + rt + '\n' + codegen.source(mod)
def compile(ir, outfn):
fd, name = tempfile.mkstemp('.ll', dir='.')
f = os.fdopen(fd, 'wb')
f.write(ir)
f.close()
try:
subprocess.check_call(('clang', '-o', outfn, name))
except OSError as e:
if e.errno == 2:
print 'error: clang not found'
except subprocess.CalledProcessError:
pass
finally:
os.unlink(name)
def full(fn, outfn):
with open(fn) as f:
mod = module(parse(tokenize(f)))
type(mod)
spec(mod)
compile(generate(mod), outfn)
| Python | 0 | @@ -892,99 +892,70 @@
%0A%09%0A%09
-fd,
name =
-tempfile.mkstemp('.ll', dir='.')%0A%09f = os.fdopen(fd, 'wb')%0A%09f.write(ir)%0A%09f.close(
+outfn + '.ll'%0A%09with open(name, 'wb') as f:%0A%09%09f.write(ir
)%0A%09%0A
|
a328a1974b985eda47191748e28a69d1e521f070 | 实现FREEBUF的AJAX页面爬取的几种小爬虫-json库解析-科学方法 | freebufspider2.py | freebufspider2.py | import requests
from bs4 import BeautifulSoup
import json
for i in range(1, 20):
url = 'http://www.freebuf.com/www.freebuf.com?action=ajax_wenku&year=all&score=all&type=all&tech=0&keyword=&page=' + str(
i)
r = requests.get(url)
data = json.loads(r.text)
soup = BeautifulSoup(data['cont'])
for i in soup.select('h3 a'):
print(i.getText(), i.get('href'))
| Python | 0 | @@ -267,16 +267,32 @@
(r.text)
+#%E4%BD%BF%E7%94%A8json%E5%BA%93%E8%A7%A3%E6%9E%90%EF%BC%8C%E7%A7%91%E5%AD%A6%E7%9A%84%E5%81%9A%E6%B3%95
%0A sou
|
cd9e8c1595e0e987e2ec0067c9532a9778e64ea3 | Update test_plugin.py | logstash_plugin/tests/test_plugin.py | logstash_plugin/tests/test_plugin.py | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import subprocess
from logstash_test_utils import (
LogstashTestUtils,
DEFAULT_UBUNTU_UNINSTALL,
DEFAULT_LOGSTASH_STOP,
DEFAULT_LOGSTASH_CONFIG_PATH
)
# from cloudify import ctx
class TestLogstashPlugin(LogstashTestUtils):
def SetUp(self):
super(LogstashTestUtils, self).setUp()
self._set_up()
def test_install_static_clean(self):
inputs = self.get_static_config_inputs()
self._set_up(inputs)
self.addCleanup(subprocess.call, DEFAULT_UBUNTU_UNINSTALL)
self.env.execute('install', task_retries=10)
self.addCleanup(subprocess.call, DEFAULT_LOGSTASH_STOP)
logstash_started = subprocess.call(
"sudo service logstash status", shell=True)
self.assertIn('started', logstash_started)
self.addCleanup(os.remove, DEFAULT_LOGSTASH_CONFIG_PATH)
with open(DEFAULT_LOGSTASH_CONFIG_PATH, 'r') as default:
self.assertEqual(default.read(), self.get_config())
def test_uninstall_static_clean(self):
self.addCleanup(subprocess.call, DEFAULT_UBUNTU_UNINSTALL)
self.addCleanup(subprocess.call, DEFAULT_LOGSTASH_STOP)
self.addCleanup(os.remove, DEFAULT_LOGSTASH_CONFIG_PATH)
inputs = self.get_static_config_inputs()
self._set_up(inputs)
self.env.execute('install', task_retries=10)
self.env.execute('uninstall', task_retries=10)
logstash_stopped = subprocess.call(
"sudo service logstash status", shell=True)
self.assertNotIn('started', logstash_stopped)
| Python | 0.000003 | @@ -1173,32 +1173,37 @@
L)%0A self.
+local
env.execute('ins
@@ -1958,32 +1958,37 @@
s)%0A self.
+local
env.execute('ins
@@ -2024,16 +2024,21 @@
self.
+local
env.exec
|
74a182a13bae5dde3e2b4fe604a839e5ec05e771 | load palette hoohah | cooperhewitt/swatchbook/palettes/__init__.py | cooperhewitt/swatchbook/palettes/__init__.py | # I blame, Guido
| Python | 0.000002 | @@ -1,17 +1,430 @@
-# I blame, Guido
+def palettes():%0A%0A return %5B%0A 'css3',%0A 'css4'%0A %5D%0A%0Adef load_palette(reference):%0A%0A if not reference in palettes():%0A raise Exception, %22Invalid palette%22%0A%0A # Please figure out the hoo-hah to make dynamic%0A # loading work (20140623/straup)%0A%0A if reference == 'css3':%0A import css3%0A return css3.colours()%0A %0Aif __name__ == '__main__':%0A%0A p = load_palette('css5')%0A print p%0A %0A
%0A
|
f6686e69e71522514f3f9e5f583e77176fdc9580 | Fix step minimum duration field type | microdrop/core_plugins/device_info_plugin/__init__.py | microdrop/core_plugins/device_info_plugin/__init__.py | """
Copyright 2015 Christian Fobel
This file is part of droplet_planning_plugin.
droplet_planning_plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dmf_control_board is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with droplet_planning_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, traceback
from datetime import datetime
from collections import OrderedDict
from path_helpers import path
from flatland import Integer, Boolean, Form, String
from flatland.validation import ValueAtLeast, ValueAtMost
from microdrop.logger import logger
from microdrop.plugin_helpers import (AppDataController, StepOptionsController,
get_plugin_info)
from microdrop.plugin_manager import (PluginGlobals, Plugin, IPlugin,
implements, emit_signal)
from microdrop.app_context import get_app
import gobject
import gtk
PluginGlobals.push_env('microdrop.managed')
class DropletPlanningPlugin(Plugin, AppDataController, StepOptionsController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
version = get_plugin_info(path(__file__).parent).version
plugin_name = get_plugin_info(path(__file__).parent).plugin_name
'''
AppFields
---------
A flatland Form specifying application options for the current plugin.
Note that nested Form objects are not supported.
Since we subclassed AppDataController, an API is available to access and
modify these attributes. This API also provides some nice features
automatically:
-all fields listed here will be included in the app options dialog
(unless properties=dict(show_in_gui=False) is used)
-the values of these fields will be stored persistently in the microdrop
config file, in a section named after this plugin's name attribute
'''
AppFields = Form.of(
Integer.named('transition_duration_ms').using(optional=True,
default=750),
)
'''
StepFields
---------
A flatland Form specifying the per step options for the current plugin.
Note that nested Form objects are not supported.
Since we subclassed StepOptionsController, an API is available to access and
modify these attributes. This API also provides some nice features
automatically:
-all fields listed here will be included in the protocol grid view
(unless properties=dict(show_in_gui=False) is used)
-the values of these fields will be stored persistently for each step
'''
StepFields = Form.of(
Boolean.named('min_duration').using(default=0, optional=True),
)
def __init__(self):
self.name = self.plugin_name
self.timeout_id = None
self.start_time = None
self.transition_counter = 0
def on_step_run(self):
"""
Handler called whenever a step is executed. Note that this signal
is only emitted in realtime mode or if a protocol is running.
Plugins that handle this signal must emit the on_step_complete
signal once they have completed the step. The protocol controller
will wait until all plugins have completed the current step before
proceeding.
return_value can be one of:
None
'Repeat' - repeat the step
or 'Fail' - unrecoverable error (stop the protocol)
"""
app = get_app()
logger.info('[DropletPlanningPlugin] on_step_run(): step #%d',
app.protocol.current_step_number)
app_values = self.get_app_values()
device_step_options = app.dmf_device_controller.get_step_options()
try:
if self.timeout_id is not None:
# Timer was already set, so cancel previous timer.
gobject.source_remove(self.timeout_id)
#if self.transition_counter < self.step_drop_route_lengths.max():
drop_route_groups = (device_step_options.drop_routes
.groupby('route_i'))
# Look up the drop routes for the current step.
self.step_drop_routes = OrderedDict([(route_i, df_route_i)
for route_i, df_route_i in
drop_route_groups])
# Get the number of transitions in each drop route.
self.step_drop_route_lengths = drop_route_groups['route_i'].count()
self.start_time = datetime.now()
self.transition_counter = 0
self.timeout_id = gobject.timeout_add(app_values
['transition_duration_ms'],
self.on_timer_tick)
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
# An error occurred while initializing Analyst remote control.
emit_signal('on_step_complete', [self.name, 'Fail'])
def on_timer_tick(self):
app = get_app()
try:
if self.transition_counter < self.step_drop_route_lengths.max():
active_step_lengths = (self.step_drop_route_lengths
.loc[self.step_drop_route_lengths >
self.transition_counter])
device_view = app.dmf_device_controller.view
for route_i, length_i in active_step_lengths.iteritems():
# Remove custom coloring for previously active electrode.
if self.transition_counter > 0:
transition_i = (self.step_drop_routes[route_i]
.iloc[self.transition_counter - 1])
device_view.set_electrode_color_by_index(transition_i
.electrode_i)
# Add custom coloring to active electrode.
transition_i = (self.step_drop_routes[route_i]
.iloc[self.transition_counter])
device_view.set_electrode_color_by_index(transition_i
.electrode_i,
(255, 255, 255))
gtk.idle_add(app.dmf_device_controller.view.update_draw_queue)
self.transition_counter += 1
else:
emit_signal('on_step_complete', [self.name, None])
self.timeout_id = None
self.start_time = None
self.transition_counter = 0
return False
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
emit_signal('on_step_complete', [self.name, 'Fail'])
self.timeout_id = None
self.remote = None
return False
return True
def on_step_options_swapped(self, plugin, old_step_number, step_number):
"""
Handler called when the step options are changed for a particular
plugin. This will, for example, allow for GUI elements to be
updated based on step specified.
Parameters:
plugin : plugin instance for which the step options changed
step_number : step number that the options changed for
"""
pass
def on_step_swapped(self, old_step_number, step_number):
"""
Handler called when the current step is swapped.
"""
PluginGlobals.pop_env()
| Python | 0.000001 | @@ -3092,23 +3092,23 @@
-Boolean
+Integer
.named('
|
ff99ed308edd661db7b692cb92eb3c6465843204 | Add JSON parser | pande_gas/utils/molecule_net/__init__.py | pande_gas/utils/molecule_net/__init__.py | """
Utilities for MoleculeNet.
"""
import re
import xml.etree.cElementTree as et
class PcbaXmlParser(object):
"""
Parser for PubChem BioAssay XML.
Parameters
----------
filename : str
Filename.
"""
def __init__(self, filename):
self.tree = et.parse(filename)
self.root = self.tree.getroot()
# default prefix for all tags
self.prefix = '{http://www.ncbi.nlm.nih.gov}'
def find(self, tag, root=None):
"""
Return a list of the elements with a given tag. Note that this only
searches the direct children of root.
Parameters
----------
tag : str
XML tag.
root : bool, optional (default False)
Root of XML tree.
"""
if root is None:
root = self.root
return root.findall(self.prefix + tag)
def join_children(self, elem):
"""
Join the text for the children of an element.
Parameters
----------
elem : Element
Element.
"""
text = ''
for child in elem.getchildren():
if child.text is not None:
text += child.text + child.tail
return text
def get_name(self):
"""
Get assay name.
"""
elem = self.find('PC-AssayDescription_name')
assert len(elem) == 1
return elem[0].text
def get_description(self):
"""
Get assay description.
"""
elem = self.find('PC-AssayDescription_description')
assert len(elem) == 1
return self.join_children(elem[0])
def get_protocol(self):
"""
Get assay protocol.
"""
elem = self.find('PC-AssayDescription_protocol')
assert len(elem) == 1
return self.join_children(elem[0])
def get_target(self):
"""
Get assay target.
Returns
-------
target : dict
A dictionary containing keys for target information types, such
as 'name', 'mol-id', and 'molecule-type'.
"""
elem = self.find('PC-AssayDescription_target')
assert len(elem) == 1
info = self.find('PC-AssayTargetInfo', elem[0])
assert len(info) == 1
target = {}
for e in info[0].getchildren():
if not e.text.strip():
continue # skip blank entries
m = re.search('PC-AssayTargetInfo_(.*)', e.tag)
key = m.groups()[0]
target[key] = e.text
return target
| Python | 0.000084 | @@ -28,16 +28,28 @@
et.%0A%22%22%22%0A
+import json%0A
import r
@@ -88,16 +88,1236 @@
as et%0A%0A%0A
+class PcbaJsonParser(object):%0A %22%22%22%0A Parser for PubChemBioAssay JSON.%0A%0A Parameters%0A ----------%0A filename : str%0A Filename.%0A %22%22%22%0A def __init__(self, filename):%0A self.tree = json.load(filename)%0A%0A # should just be one record per file%0A assert len(self.tree%5B'PC_AssayContainer'%5D) == 1%0A%0A # move in to the assay description%0A self.root = self.tree%5B'PC_AssayContainer'%5D%5B0%5D%5B'assay'%5D%5B'descr'%5D%0A%0A def get_name(self):%0A %22%22%22%0A Get assay name.%0A %22%22%22%0A return self.root%5B'name'%5D%0A%0A def get_description(self):%0A %22%22%22%0A Get assay description.%0A %22%22%22%0A return '%5Cn'.join(self.root%5B'description'%5D)%0A%0A def get_protocol(self):%0A %22%22%22%0A Get assay protocol.%0A %22%22%22%0A return '%5Cn'.join(self.root%5B'protocol'%5D)%0A%0A def get_target(self):%0A %22%22%22%0A Get assay target.%0A%0A TODO: Decide which fields are important. We may be able to match%0A targets by mol-id.%0A%0A Returns%0A -------%0A target : dict%0A A dictionary containing keys for target information types, such%0A as 'name', 'mol-id', and 'molecule-type'.%0A %22%22%22%0A return self.root%5B'target'%5D%0A%0A%0A
class Pc
|
220983a4cf75f4e27f5491812de9ff04f4104510 | fix butter_bandpass | code/python/seizures/preprocessing/preprocessing.py | code/python/seizures/preprocessing/preprocessing.py | import scipy.signal as signal
def preprocess_multichannel_data(matrix,fs):
"""
:param matrix: multichannel EEG data
:param fs: sampling frequency
:return: data without mains, electrical artefacts etc
authors: Lea and Vincent
"""
n_channel,m= matrix.shape
for i in range(n_channel):
preprocess_single_channel(matrix[i,:],fs)
def preprocess_single_channel(x,fs):
x = remove_elec_noise(x,fs)
x = anti_alias_filter(x)
x = remove_dc(x)
return x
def remove_dc(x):
"""
Remove mean of signal: use 0.5Hz cut-off hp filter
:return:
"""
x = signal.medfilt(x)
return x
def remove_elec_noise(x,fs):
"""
Bandpass remove:59-61Hz (US); if data from EU/UK 49-51Hz
:return:
"""
lowcut = 59
highcut = 61
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = signal.butter_bandpass(lowcut, highcut, fs, order=order)
y = signal.lfilter(b, a, data)
return y
return butter_bandpass_filter(x,fs)
def anti_alias_filter(x,fs):
"""
Anti_aliasing: use Nyquist frequ cutoff low-pass filter
:return:
"""
numtaps = 1
cutoff = 0.5 * fs
x = signal.firwin(numtaps, cutoff)
return x
| Python | 0.000004 | @@ -766,19 +766,45 @@
-lowcut = 59
+bandstop = 60%0A lowcut = bandstop-1
%0A
@@ -814,17 +814,25 @@
ghcut =
-6
+bandstop+
1%0A%0A%0A
@@ -1077,20 +1077,17 @@
_filter(
-data
+x
, lowcut
|
4a4e56a0909d8e89d82462c846f365b0849b3cb4 | add missing import | generator/aidl.py | generator/aidl.py | #!/usr/bin/python
from eclipse2buck.generator.base_target import BaseTarget
from eclipse2buck.decorator import target
from eclipse2buck.util import util
from eclipse2buck import config
class AIDL(BaseTarget):
"""
generated all aidl targets
"""
aidl_path_list = []
def __init__(self, root, name):
BaseTarget.__init__(self, root, name, config.aidl_suffix)
self.aidl_path_list = self._find_all_aidls(os.path.join(self.lib_path, 'src'))
for aidl_path in self.aidl_path_list:
name = self.target_name(util.path_get_basename(aidl_path))
self.deps.append(":%s" % name)
def dump_src(self):
for aidl in self.aidl_path_list:
#remove .aild
aidl = aidl[:-5]
print "genfile( '%s.java' )," % aidl
def dump(self):
for aidl_path in self.aidl_path_list:
name = self.target_name(util.path_get_basename(aidl_path))
self._gen_aidl_target(name, aidl_path)
def is_existed_aidl(self):
return len(self.aidl_path_list) > 0
def _find_all_aidls(self, relative_path):
path_list = util.find_all_files_with_suffix(relative_path, "*.aidl")
exclude_aidls = ["src/com/tencent/mm/cache/MCacheItem.aidl",
"src/com/tencent/tmassistantsdk/downloadclient/TMAssistantDownloadTaskInfo.aidl"]
#some aidl file needn't be generated
for exclude_aidl in exclude_aidls:
if exclude_aidl in path_list:
path_list.remove(exclude_aidl)
return path_list
@target("gen_aidl")
def _gen_aidl_target(self, aidl_name, path):
"""
print the aidl target
Returns:
str: the target name which lib target should depend on
"""
print "name = '%s'," % aidl_name
print "aidl = '%s'," % path
print "import_path = '%s/src/'," % self.proj_name
| Python | 0.000042 | @@ -178,16 +178,26 @@
t config
+%0Aimport os
%0A%0Aclass
|
6cf322bbce2bfd4088cc4c5af96cd72cad86ea95 | Add aspect as a parameter of Displayer init. | manifold/infrastructure/displayer.py | manifold/infrastructure/displayer.py | import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import confusion_matrix
class Displayer(object):
def __init__(self, **kwargs):
self.items = []
self.parameters = ', '.join(['%s: %s' % (k, str(v)) for k, v in kwargs.items()])
self.aspect = (20, -40)
def load(self, data, color=None, title=None):
# Always copy the data, and, of course, only the first three dimensions.
self.items.append((data[:, :3], color, title))
return self
def render(self):
# Assert that there is at least one graph to show.
assert self.items, 'nothing graphs to render.'
fig = plt.figure(figsize=(16, 9))
plt.suptitle(self.parameters)
count = len(self.items)
items_in_row = math.ceil(math.sqrt(count))
rows_count = math.ceil(count / items_in_row)
for i, item in enumerate(self.items):
data, color, title = item
samples, dimension = data.shape
# Grab data set components. It necessarily has 3 dimensions, as it was cut during load().
components = [data[:, i] for i in range(dimension)]
if dimension == 1:
components.append(np.zeros((samples, 1)))
kwargs = {}
if dimension > 2:
kwargs['projection'] = '3d'
ax = fig.add_subplot(
rows_count * 100 +
items_in_row * 10 +
1 + i, **kwargs)
kwargs = {}
if color is not None:
kwargs['c'] = color
ax.scatter(*components, s=50.0, cmap=plt.cm.rainbow, **kwargs)
if title:
plt.title(title)
plt.axis('tight')
if dimension > 2:
ax.view_init(*self.aspect)
plt.show()
return self
@classmethod
def confusion_matrix_for(cls, target_test, target_predicted, title='Confusion matrix'):
cm = confusion_matrix(target_test, target_predicted)
np.set_printoptions(precision=2)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| Python | 0 | @@ -220,18 +220,48 @@
elf.
-items = %5B%5D
+aspect = kwargs.pop('aspect', (20, -40))
%0A
@@ -363,26 +363,18 @@
elf.
-aspect = (20, -40)
+items = %5B%5D
%0A%0A
@@ -1336,54 +1336,69 @@
-kwargs = %7B%7D%0A if dimension %3E 2:%0A
+if color is None:%0A color = np.zeros(samples)%0A%0A
@@ -1411,17 +1411,20 @@
kwargs
-%5B
+ = %7B
'project
@@ -1431,16 +1431,40 @@
ion'
-%5D =
+:
'3d'
+%7D if dimension %3E 2 else %7B%7D
%0A%0A
@@ -1616,140 +1616,113 @@
-kwargs = %7B%7D%0A if color is not None:%0A kwargs%5B'c'%5D = color%0A%0A ax.scatter(*components, s=50.0,
+ax.scatter(*components, **%7B%0A 'c': color,%0A 's': 50,%0A '
cmap
-=
+':
plt.
@@ -1731,27 +1731,32 @@
.rainbow
-, **kwargs)
+%0A %7D)%0A
%0A
|
da54f60def189953b9ebbd754200103668e00042 | Handle full result sets. | marshmallow_pagination/paginators.py | marshmallow_pagination/paginators.py | # -*- coding: utf-8 -*-
import abc
import math
import six
import sqlalchemy as sa
from marshmallow_sqlalchemy.convert import ModelConverter
from marshmallow_pagination import pages
converter = ModelConverter()
def convert_value(row, attr):
field = converter._get_field_class_for_property(attr.property)
value = getattr(row, attr.key)
return field()._serialize(value, None, None)
class BasePaginator(six.with_metaclass(abc.ABCMeta, object)):
def __init__(self, cursor, per_page, count=None):
self.cursor = cursor
self.per_page = per_page
self.count = count or self._count()
def _count(self):
return self.cursor.count()
@abc.abstractproperty
def page_type(self):
pass
@property
def pages(self):
if self.per_page:
return int(math.ceil(self.count / self.per_page))
return 0
@abc.abstractproperty
def get_page(self):
pass
class OffsetPaginator(BasePaginator):
"""Paginator based on offsets and limits. Not performant for large result sets.
"""
page_type = pages.OffsetPage
def get_page(self, page, eager=True):
offset, limit = self.per_page * (page - 1), self.per_page
return self.page_type(self, page, self._fetch(offset, limit, eager=eager))
def _fetch(self, offset, limit, eager=True):
offset += (self.cursor._offset or 0)
if self.cursor._limit:
limit = min(limit, self.cursor._limit - offset)
query = self.cursor.offset(offset).limit(limit)
return query.all() if eager else query
class SeekPaginator(BasePaginator):
"""Paginator using keyset pagination for performance on large result sets.
See http://use-the-index-luke.com/no-offset for details.
"""
page_type = pages.SeekPage
def __init__(self, cursor, per_page, index_column, sort_column=None, count=None):
self.index_column = index_column
self.sort_column = sort_column
super(SeekPaginator, self).__init__(cursor, per_page, count=count)
def get_page(self, last_index=None, sort_index=None, eager=True):
limit = self.per_page
return self.page_type(self, self._fetch(last_index, sort_index, limit, eager=eager))
def _fetch(self, last_index, sort_index=None, limit=None, eager=True):
cursor = self.cursor
direction = self.sort_column[1] if self.sort_column else sa.asc
lhs, rhs = (), ()
if sort_index is not None:
lhs += (self.sort_column, )
rhs += (sort_index, )
if last_index is not None:
lhs += (self.index_column, )
rhs += (last_index, )
lhs = sa.tuple_(*lhs)
rhs = sa.tuple_(*rhs)
if rhs.clauses:
filter = lhs > rhs if direction == sa.asc else lhs < rhs
cursor = cursor.filter(filter)
query = cursor.order_by(direction(self.index_column)).limit(limit)
return query.all() if eager else query
def _get_index_values(self, result):
"""Get index values from last result, to be used in seeking to the next
page. Optionally include sort values, if any.
"""
ret = {'last_index': convert_value(result, self.index_column)}
if self.sort_column:
key = 'last_{0}'.format(self.sort_column[0].key)
ret[key] = convert_value(result, self.sort_column[0])
return ret
| Python | 0 | @@ -553,54 +553,71 @@
elf.
-per_page = per_page%0A self.count = count
+count = count or self._count()%0A self.per_page = per_page
or
@@ -621,24 +621,21 @@
or self.
-_
count
-()
%0A%0A de
|
9105326cdb6ad7a6d4d23504ef36beb6303eaf65 | make offset_date type unaware | custom/opm/opm_reports/tests/case_reports.py | custom/opm/opm_reports/tests/case_reports.py | from collections import defaultdict
from datetime import datetime, date
from unittest import TestCase
from jsonobject import (JsonObject, DictProperty, DateTimeProperty,
StringProperty, IntegerProperty, BooleanProperty)
from casexml.apps.case.models import CommCareCase
from custom.opm.opm_reports.reports import SharedDataProvider
from dimagi.utils.dates import DateSpan, add_months
from ..beneficiary import OPMCaseRow
class AggressiveDefaultDict(defaultdict):
def __contains__(self, item):
return True
class MockDataProvider(SharedDataProvider):
"""
Mock data provider to manually specify vhnd availability per user
"""
def __init__(self, datespan, vhnd_map=None):
super(MockDataProvider, self).__init__(datespan)
self.vhnd_map = vhnd_map if vhnd_map is not None else AggressiveDefaultDict(lambda: True)
@property
def vhnd_availability(self):
return self.vhnd_map
class Report(JsonObject):
month = IntegerProperty(required=True)
year = IntegerProperty(required=True)
block = StringProperty(required=True)
_data_provider = None
@property
def data_provider(self):
return self._data_provider
@property
def datespan(self):
return DateSpan.from_month(self.month, self.year, inclusive=True)
class Form(JsonObject):
xmlns = StringProperty('something')
form = DictProperty(required=True)
received_on = DateTimeProperty(required=True)
class OPMCase(CommCareCase):
opened_on = DateTimeProperty(datetime(2010, 1, 1))
block_name = StringProperty("Sahora")
type = StringProperty("pregnancy")
closed = BooleanProperty(default=False)
closed_on = DateTimeProperty()
awc_name = StringProperty("Atri")
owner_id = StringProperty("Sahora")
def __init__(self, forms=None, **kwargs):
super(OPMCase, self).__init__(**kwargs)
self._fake_forms = forms if forms is not None else []
def get_forms(self):
return self._fake_forms
class MockCaseRow(OPMCaseRow):
"""
Spoof the following fields to create example cases
"""
def __init__(self, case, report, data_provider=None):
self.case = case
self.report = report
self.report.snapshot = None
self.report.is_rendered_as_email = None
self.report._data_provider = data_provider or MockDataProvider(self.report.datespan)
super(MockCaseRow, self).__init__(case, report)
class OPMCaseReportTestBase(TestCase):
def setUp(self):
self.report_date = date(2014, 6, 1)
self.report_datetime = datetime(2014, 6, 1)
self.report = Report(month=6, year=2014, block="Atri")
def get_relative_edd_from_preg_month(report_date, month):
months_until_edd = 10 - month
new_year, new_month = add_months(report_date.year, report_date.month, months_until_edd)
return type(report_date)(new_year, new_month, 1)
def offset_date(reference_date, offset):
new_year, new_month = add_months(reference_date.year, reference_date.month, offset)
return date(new_year, new_month, 1)
class MockDataTest(OPMCaseReportTestBase):
def test_mock_data(self):
report = Report(month=6, year=2014, block="Atri")
form = Form(form={'foo': 'bar'}, received_on=datetime(2014, 6, 15))
case = OPMCase(
forms=[form],
# add/override any desired case properties here
edd=date(2014, 12, 10),
)
row = MockCaseRow(case, report)
| Python | 0.000047 | @@ -3051,20 +3051,36 @@
return
+type(reference_
date
+)
(new_yea
|
ba4396f1868dad9a637ddd3cbf9e935fa8d93cf0 | print Exception error | git_downloader.py | git_downloader.py | #!/usr/bin/env python
#
import sys, os, argparse, logging, fnmatch, urllib, posixpath, urlparse, socket
from github import Github
def main(args, loglevel):
logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)
socket.setdefaulttimeout(args.timeout)
g = Github()
with open(args.repo_file, 'r') as f:
file_counter = 0
for line in f.readlines():
logging.info('Fetching repository: %s' % line)
try:
repo_str = line.rstrip().split('github.com/')[-1]
repo = g.get_repo(repo_str)
tree = repo.get_git_tree('master', recursive=True)
files_to_download = []
for file in tree.tree:
if fnmatch.fnmatch(file.path, args.wildcard):
files_to_download.append('https://github.com/%s/raw/master/%s' % (repo_str, file.path))
for file in files_to_download:
logging.info('Downloading %s' % file)
file_counter += 1
filename = posixpath.basename(urlparse.urlsplit(file).path)
output_path = os.path.join(args.output_dir, filename)
if os.path.exists(output_path):
output_path += "-" + str(file_counter)
try:
urllib.urlretrieve(file, output_path)
except:
logging.error('Error downloading %s' % file)
except:
logging.error('Error fetching repository %s' % line)
args.yara_meta = os.path.join(args.output_dir, args.yara_meta)
with open(args.yara_meta, 'w') as f:
for i in os.listdir(args.output_dir):
try:
f.write("include \"" + i + "\"\n")
except:
logging.error('Couldn\'t write to %s' % args.yara_meta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Github file downloader")
parser.add_argument("-r",
"--repo_file",
help = "Path for the input file which contains a url of a Github repository for each separate line")
parser.add_argument("-w",
"--wildcard",
help = "Unix shell-style wildcard to match files to download (for example: *.txt)")
parser.add_argument("-o",
"--output_dir",
default = "",
help = "Directory to store all downloaded files")
parser.add_argument("-y",
"--yara-meta",
default = "rules.yara",
help = "Yara meta rule filename to create")
parser.add_argument("-t",
"--timeout",
default = 30,
help = "Socket timeout (seconds)")
parser.add_argument("-v",
"--verbose",
help="increase output verbosity",
action="store_true")
args = parser.parse_args()
# Setup logging
if args.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
main(args, loglevel) | Python | 0.999548 | @@ -1421,32 +1421,51 @@
except
+ Exception as error
:%0A
@@ -1502,35 +1502,48 @@
downloading
+ %25s.
%25s' %25
+(
file
+, error)
)%0A
@@ -1542,32 +1542,51 @@
except
+ Exception as error
:%0A
@@ -1628,27 +1628,40 @@
pository
+ %25s.
%25s' %25
+(
line
+, error)
)%0A ar
@@ -1894,16 +1894,35 @@
except
+ Exception as error
:%0A
@@ -1964,23 +1964,28 @@
write to
+ %25s:
%25s' %25
+(
args.yar
@@ -1990,16 +1990,24 @@
ara_meta
+, error)
)%0A%0A%0A%0Aif
|
665d2991bbf8ac9d11cd7211ef9123863689adff | remove redundant module sys, add begin/end timestamps | gitmyfeeds_bot.py | gitmyfeeds_bot.py | #!/usr/bin/python
import sys
import json
import httplib
from bs4 import BeautifulSoup
import telegram
import re
import psycopg2
import psycopg2.extras
with open('config.json', 'r') as f:
config = json.load(f)
pg = config['db']['pg_conn']
tg_bot = config['telegram_bot']
""" connect to postgres """
conn = psycopg2.connect("host=%s port=%s dbname=%s user=%s password=%s"\
% (pg['host'], pg['port'], pg['dbname'], pg['user'], pg['pass'])\
, cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor()
""" now use test user only """
USER_ID = 1 # TODO: implement multi-user delivering
cur.execute((
"select uat.token, gu.username "
"from users_atom_tokens uat "
" join github_users gu on gu.user_id = uat.user_id "
"where uat.user_id = %s")% USER_ID)
atom = cur.fetchone()
""" get feeds for one test user """
c = httplib.HTTPSConnection('github.com')
c.request('GET', '/%s.private.atom?token=%s'% (atom['username'], atom['token']))
response = c.getresponse()
data = response.read()
soup = BeautifulSoup(data, 'html.parser')
""" prepare regexp to get event and entry id from xml """
entry_id_pat = re.compile(r".*:(\w+)/(\d+)$")
""" parse entries data and save it in the db """
for entry in soup.find_all('entry'):
published = entry.published.get_text()
entry_title = entry.title.get_text()
published = entry.published.get_text()
link = entry.link['href']
title = entry.title.get_text()
author_raw = entry.author
author_names = author_raw.find_all('name')
author = None
if len(author_names):
author = author_names[0].get_text()
entry_id_raw = entry.id.get_text()
parse_id_res = entry_id_pat.match(entry_id_raw)
if parse_id_res is None:
print "notice: could not recognize entry id by pattern. skip"
continue
event = parse_id_res.group(1)
entry_id = parse_id_res.group(2)
entry_text = None
entry_text_raw = entry.content.get_text()
soup_entry = BeautifulSoup(entry_text_raw, 'html.parser')
quote = soup_entry.blockquote
if quote is not None:
entry_text = quote.get_text().strip()
cur.execute("insert into feeds_private("\
"user_id, event, entry_id, published, title, author, content, link"\
") values(%s, %s, %s, %s, %s, %s, %s, %s) "
"on conflict (entry_id) do nothing",\
(USER_ID, event, entry_id, published, title, author, entry_text, link))
conn.commit()
""" prepare telegram bot """
bot = telegram.Bot(token = tg_bot['token'])
""" for all active chats send new feeds """
cur_feeds = conn.cursor()
cur_feeds.execute((
"select fp.id, fp.title, fp.link, fp.content "
" , to_char(fp.published, 'dd.mm.yy hh24:mi') dt "
"from feeds_private fp "
" left join feeds_sent fs "
" on fp.id = fs.feed_private_id and fs.user_id = %s "
"where fs.id is null "
"order by fp.published asc "
"limit %s ") % (USER_ID, tg_bot['send_feeds_limit'])
)
cur_upd = conn.cursor()
cur.execute("select chat_id from chats_to_send where active = true")
for chat in cur:
for feed in cur_feeds:
""" prepare message to send """
msg = "*%s* [%s](%s)"% (feed['dt'], feed['title'], feed['link'])
if not feed['content'] is None:
msg += "\n_%s_"% feed['content']
""" send it """
bot.sendMessage(chat_id = chat['chat_id']\
, text = msg\
, parse_mode = 'Markdown'\
, disable_web_page_preview = True)
""" mark as read to skip it next time """
cur_upd.execute((
"insert into feeds_sent(feed_private_id, user_id) "
"values(%s, %s)")% (feed['id'], USER_ID))
conn.commit()
cur_feeds.close()
cur_upd.close()
cur.close()
conn.close()
| Python | 0.000001 | @@ -23,11 +23,12 @@
ort
-sys
+time
%0Aimp
@@ -272,16 +272,75 @@
_bot'%5D%0A%0A
+print '%5B%25s%5D start...'%25 time.strftime('%25d.%25m.%25Y %25H:%25M:%25S')%0A%0A
%22%22%22 conn
@@ -3204,16 +3204,96 @@
_feeds:%0A
+ print 'send feed item %5B%25s%5D to chat %5B%25s%5D'%25 (feed%5B'id'%5D, chat%5B'chat_id'%5D)%0A
@@ -3952,16 +3952,74 @@
)%0A%0Aconn.close()%0A
+%0Aprint '%5B%25s%5D finish.'%25 time.strftime('%25d.%25m.%25Y %25H:%25M:%25S')%0A
|
cadf61287b9b68af5b734b4ab2fefd9c758cfc26 | Update skiptest | data_importer/tests/test_generic_importer.py | data_importer/tests/test_generic_importer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import mock
import os
import django
from django.test import TestCase
from unittest import skipIf
from data_importer.importers.generic import GenericImporter
from data_importer.readers.xls_reader import XLSReader
from data_importer.readers.xlsx_reader import XLSXReader
from data_importer.readers.csv_reader import CSVReader
from data_importer.readers.xml_reader import XMLReader
from data_importer.core.exceptions import UnsuportedFile
from data_importer.models import FileHistory
LOCAL_DIR = os.path.dirname(__file__)
class TestGeneralImpoerterSetup(TestCase):
def setUp(self):
self.xls_file = os.path.join(LOCAL_DIR, 'data/test.xls')
self.xlsx_file = os.path.join(LOCAL_DIR, 'data/test.xlsx')
self.csv_file = os.path.join(LOCAL_DIR, 'data/test.csv')
self.xml_file = os.path.join(LOCAL_DIR, 'data/test.xml')
self.unsuported_file = os.path.join(LOCAL_DIR, 'data/test_json_descriptor.json')
def test_xls_reader_set(self):
importer = GenericImporter(source=self.xls_file)
self.assertEquals(importer.get_reader_class(), XLSReader)
def test_xlsx_reader_set(self):
importer = GenericImporter(source=self.xlsx_file)
self.assertEquals(importer.get_reader_class(), XLSXReader)
def test_csv_reader_set(self):
importer = GenericImporter(source=self.csv_file)
self.assertEquals(importer.get_reader_class(), CSVReader)
def test_xml_reader_set(self):
importer = GenericImporter(source=self.xml_file)
self.assertEquals(importer.get_reader_class(), XMLReader)
def test_getting_source_file_extension(self):
importer = GenericImporter(source=self.csv_file)
self.assertEquals(importer.get_source_file_extension(), 'csv')
@skipIf((django.get_version() < '1.4'), "not supported in this library version")
def test_unsuported_raise_error_message(self):
with self.assertRaisesMessage(UnsuportedFile, 'Unsuported File'):
GenericImporter(source=self.unsuported_file)
def test_import_with_file_instance(self):
file_instance = open(self.csv_file)
importer = GenericImporter(source=file_instance)
self.assertEquals(importer.get_source_file_extension(), 'csv')
def test_import_with_model_instance(self):
file_mock = mock.MagicMock(spec=FileHistory, name='FileHistoryMock')
file_mock.file_upload = '/media/test.csv'
importer = GenericImporter(source=file_mock)
self.assertEquals(importer.get_source_file_extension(), 'csv')
| Python | 0.000001 | @@ -1812,37 +1812,30 @@
pIf(
-(
django.
-get_version() %3C '1.4'
+VERSION %3C (1, 4
), %22
|
6e7a20675cd66d9ca7d4a286958404198369dece | Validate ReplicationTopology data | dbaas/physical/forms/replication_topology.py | dbaas/physical/forms/replication_topology.py | from django import forms
from django.forms.widgets import SelectMultiple
#from django.forms.widgets import CheckboxSelectMultiple
from ..models import ReplicationTopology, Parameter
class ReplicationTopologyForm(forms.ModelForm):
class Meta:
model = ReplicationTopology
def __init__(self, *args, **kwargs):
super(ReplicationTopologyForm, self).__init__(*args, **kwargs)
self.fields["parameter"].widget = SelectMultiple()
#self.fields["parameter"].widget = CheckboxSelectMultiple()
self.fields["parameter"].queryset = Parameter.objects.all()
self.fields["parameter"].help_text = 'Select the parameters that can be changed in this topology'
| Python | 0.000001 | @@ -1,16 +1,40 @@
+# -*- coding: utf-8 -*-%0A
from django impo
@@ -198,16 +198,40 @@
arameter
+, DatabaseInfraParameter
%0A%0A%0Aclass
@@ -635,16 +635,16 @@
s.all()%0A
-
@@ -741,8 +741,1119 @@
pology'%0A
+%0A def clean(self):%0A cleaned_data = super(ReplicationTopologyForm, self).clean()%0A%0A if self.instance.id and 'parameter' in self.changed_data:%0A form_parameters = cleaned_data.get(%22parameter%22)%0A topology_parameters = Parameter.objects.filter(%0A replication_topologies=self.instance%0A )%0A for topology_parameter in topology_parameters:%0A if topology_parameter not in form_parameters:%0A parametersinfra = DatabaseInfraParameter.objects.filter(%0A parameter=topology_parameter,%0A databaseinfra__plan__replication_topology=self.instance%0A )%0A if parametersinfra:%0A parameterinfra = parametersinfra%5B0%5D%0A msg = %22The parameter %7B%7D can not be deleted. It has been set in the databaseinfra %7B%7D.%22.format(%0A parameterinfra.parameter, parameterinfra.databaseinfra%0A )%0A raise forms.ValidationError(msg)%0A%0A return cleaned_data%0A
|
12ca4bef094b03fa5a049fc38739bb24ebdde5b7 | use constants | corehq/apps/userreports/reports/builder/__init__.py | corehq/apps/userreports/reports/builder/__init__.py | from __future__ import absolute_import
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
import six
DEFAULT_CASE_PROPERTY_DATATYPES = {
"name": "string",
"modified_on": "datetime",
"opened_on": "datetime",
"owner_id": "string",
"user_id": "string",
}
FORM_QUESTION_DATATYPE_MAP = {
"Select": "single",
"MSelect": "multiple"
}
FORM_METADATA_PROPERTIES = [
('username', 'Text'),
('userID', 'Text'),
('timeStart', 'DateTime'),
('timeEnd', 'DateTime'),
('deviceID', 'Text'),
]
DATA_SOURCE_TYPE_CHOICES = (
("case", _("Cases")),
("form", _("Forms")),
)
def make_case_data_source_filter(case_type):
return {
"type": "boolean_expression",
"operator": "eq",
"expression": {
"type": "property_name",
"property_name": "type"
},
"property_value": case_type,
}
def make_form_data_source_filter(xmlns):
return {
"type": "boolean_expression",
"operator": "eq",
"expression": {
"type": "property_name",
"property_name": "xmlns"
},
"property_value": xmlns,
}
def make_case_property_indicator(property_name, column_id=None, datatype=None):
"""
Return a data source indicator configuration (a dict) for the given case
property. This will expand index case references if provided in the format
parent/host/foo
"""
datatype = datatype or DEFAULT_CASE_PROPERTY_DATATYPES.get(property_name, "string")
parts = property_name.split('/')
root_field = parts.pop()
expression = {
'type': 'property_name',
'property_name': root_field,
}
if parts:
case_expression = {
'type': 'identity',
}
for index in parts:
case_expression = {
'type': 'indexed_case',
'case_expression': case_expression,
'index': index
}
expression = {
'type': 'nested',
'argument_expression': case_expression,
'value_expression': expression
}
return {
"type": "expression",
"column_id": column_id or property_name,
"datatype": datatype,
"display_name": property_name,
"expression": expression,
}
def make_form_question_indicator(question, column_id=None, data_type=None, root_doc=False):
"""
Return a data source indicator configuration (a dict) for the given form
question.
"""
path = question['value'].split('/')
expression = {
"type": "property_path",
'property_path': ['form'] + path[2:],
}
if root_doc:
expression = {"type": "root_doc", "expression": expression}
return {
"type": "expression",
"column_id": column_id or question['value'],
"display_name": path[-1],
"datatype": data_type or get_form_indicator_data_type(question['type']),
"expression": expression
}
def make_multiselect_question_indicator(question, column_id=None):
path = question['value'].split('/')
return {
"type": "choice_list",
"column_id": column_id or question['value'],
"display_name": path[-1],
"property_path": ['form'] + path[2:],
"select_style": "multiple",
"choices": [o['value'] for o in question['options']],
}
def make_form_meta_block_indicator(spec, column_id=None, root_doc=False):
"""
Return a data source indicator configuration (a dict) for the given
form meta field and data type.
"""
field_name = spec[0]
if isinstance(field_name, six.string_types):
field_name = [field_name]
data_type = spec[1]
column_id = column_id or field_name[0]
expression = {
"type": "property_path",
"property_path": ['form', 'meta'] + field_name,
}
if root_doc:
expression = {"type": "root_doc", "expression": expression}
return {
"type": "expression",
"column_id": column_id,
"display_name": field_name[0],
"datatype": get_form_indicator_data_type(data_type),
"expression": expression
}
def get_form_indicator_data_type(question_type):
return {
"date": "date",
"datetime": "datetime",
"Date": "date",
"DateTime": "datetime",
"Int": "integer",
"Double": "decimal",
"Text": "string",
"string": "string",
}.get(question_type, "string")
def get_filter_format_from_question_type(question_type):
return {
"Date": 'date',
"DateTime": "date",
"Text": "dynamic_choice_list",
"Int": "numeric",
"Double": "numeric",
}.get(question_type, "dynamic_choice_list")
| Python | 0.00002 | @@ -567,75 +567,166 @@
%0A%5D%0A%0A
-%0ADATA_SOURCE_TYPE_CHOICES = (%0A (%22case%22, _(%22Cases%22)),%0A (%22form%22
+DATA_SOURCE_TYPE_CASE = 'case'%0ADATA_SOURCE_TYPE_FORM = 'form'%0ADATA_SOURCE_TYPE_CHOICES = (%0A (DATA_SOURCE_TYPE_CASE, _(%22Cases%22)),%0A (DATA_SOURCE_TYPE_FORM
, _(
|
1820001e6ec6960014b5e9cf23eb7a2f8b90c213 | Remove a broken test case from decorators_test | dm_control/mujoco/testing/decorators_test.py | dm_control/mujoco/testing/decorators_test.py | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests of the decorators module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from absl.testing import parameterized
from dm_control.mujoco.testing import decorators
import mock
from six.moves import xrange # pylint: disable=redefined-builtin
class RunThreadedTest(absltest.TestCase):
@mock.patch(decorators.__name__ + ".threading")
def test_number_of_threads(self, mock_threading):
num_threads = 5
mock_threads = [mock.MagicMock() for _ in xrange(num_threads)]
for thread in mock_threads:
thread.start = mock.MagicMock()
thread.join = mock.MagicMock()
mock_threading.Thread = mock.MagicMock(side_effect=mock_threads)
test_decorator = decorators.run_threaded(num_threads=num_threads)
tested_method = mock.MagicMock()
tested_method.__name__ = "foo"
test_runner = test_decorator(tested_method)
test_runner(self)
for thread in mock_threads:
thread.start.assert_called_once()
thread.join.assert_called_once()
def test_number_of_iterations(self):
calls_per_thread = 5
tested_method = mock.MagicMock()
tested_method.__name__ = "foo"
test_decorator = decorators.run_threaded(
num_threads=1, calls_per_thread=calls_per_thread)
test_runner = test_decorator(tested_method)
test_runner(self)
self.assertEqual(calls_per_thread, tested_method.call_count)
def test_works_with_named_parameters(self):
func = mock.MagicMock()
names = ["foo", "bar", "baz"]
params = [1, 2, 3]
calls_per_thread = 2
num_threads = 4
class FakeTest(parameterized.TestCase):
@parameterized.named_parameters(zip(names, params))
@decorators.run_threaded(calls_per_thread=calls_per_thread,
num_threads=num_threads)
def test_method(self, param):
func(param)
suite = absltest.TestLoader().loadTestsFromTestCase(FakeTest)
suite.debug() # Run tests without collecting the output.
expected_call_count = len(params) * calls_per_thread * num_threads
self.assertEqual(func.call_count, expected_call_count)
actual_params = {call[0][0] for call in func.call_args_list}
self.assertSetEqual(set(params), actual_params)
if __name__ == "__main__":
absltest.main()
| Python | 0.000015 | @@ -872,47 +872,8 @@
test
-%0Afrom absl.testing import parameterized
%0A%0Afr
@@ -2117,846 +2117,8 @@
t)%0A%0A
- def test_works_with_named_parameters(self):%0A%0A func = mock.MagicMock()%0A names = %5B%22foo%22, %22bar%22, %22baz%22%5D%0A params = %5B1, 2, 3%5D%0A calls_per_thread = 2%0A num_threads = 4%0A%0A class FakeTest(parameterized.TestCase):%0A%0A @parameterized.named_parameters(zip(names, params))%0A @decorators.run_threaded(calls_per_thread=calls_per_thread,%0A num_threads=num_threads)%0A def test_method(self, param):%0A func(param)%0A%0A suite = absltest.TestLoader().loadTestsFromTestCase(FakeTest)%0A suite.debug() # Run tests without collecting the output.%0A%0A expected_call_count = len(params) * calls_per_thread * num_threads%0A%0A self.assertEqual(func.call_count, expected_call_count)%0A actual_params = %7Bcall%5B0%5D%5B0%5D for call in func.call_args_list%7D%0A self.assertSetEqual(set(params), actual_params)%0A%0A
%0Aif
|
b58d296373ed4ba75d0e6409e332e70abea76086 | add more axes labels | data/boada/analysis_all/redshifts/redshift_stats.py | data/boada/analysis_all/redshifts/redshift_stats.py | import pandas as pd
import pylab as pyl
from glob import glob
files = glob('*.csv')
for f in files:
results = pd.read_csv(f)
# good redshifts
try:
q0 = pyl.append(q0, results[results.Q == 0].r.values)
q1 = pyl.append(q1, results[results.Q == 1].r.values)
x = ~pyl.isnan(results.fiber) & pyl.isnan(results.Q)
q2 = pyl.append(q2, results.r.values[x.values])
except NameError:
q0 = results[results.Q==0].r.values
q1 = results[results.Q==1].r.values
x = ~pyl.isnan(results.fiber) & pyl.isnan(results.Q)
q2 = results.r.values[x.values]
bins = pyl.linspace(14,22,15)
pyl.hist(q2, weights=pyl.zeros_like(q2)+1./q2.size, histtype='step', bins=bins,
lw=2, label='Q=2')
pyl.hist(q1, weights=pyl.zeros_like(q1)+1./q1.size, histtype='step', bins=bins,
lw=2, label='Q=1')
q0 = q0[~pyl.isnan(q0)]
pyl.hist(q0, weights=pyl.zeros_like(q0)+1./q0.size, histtype='step', bins=bins,
lw=2, label='Q=0')
pyl.legend(loc='upper right')
pyl.gca().invert_xaxis()
pyl.ylim(0,0.5)
pyl.xlabel('$m_r$')
| Python | 0 | @@ -607,16 +607,112 @@
alues%5D%0A%0A
+# make a figure%0Af = pyl.figure(1,figsize=(5,5*(pyl.sqrt(5.)-1.0)/2.0))%0Aax = f.add_subplot(111)%0A%0A
bins = p
@@ -733,19 +733,18 @@
,22,15)%0A
-pyl
+ax
.hist(q2
@@ -839,19 +839,18 @@
='Q=2')%0A
-pyl
+ax
.hist(q1
@@ -969,19 +969,18 @@
an(q0)%5D%0A
-pyl
+ax
.hist(q0
@@ -1076,19 +1076,18 @@
'Q=0')%0A%0A
-pyl
+ax
.legend(
@@ -1109,17 +1109,10 @@
t')%0A
-pyl.gca()
+ax
.inv
@@ -1124,20 +1124,23 @@
axis()%0A%0A
-pyl.
+ax.set_
ylim(0,0
@@ -1143,20 +1143,23 @@
(0,0.5)%0A
-pyl.
+ax.set_
xlabel('
@@ -1166,9 +1166,54 @@
$m_r$')%0A
+ax.set_ylabel('Fraction of Total')%0Apyl.show()
%0A
|
b8399e48872271ccac6431d9f875238ff509a03a | Increment number of JS files in test_js_load | InvenTree/InvenTree/test_views.py | InvenTree/InvenTree/test_views.py | """
Unit tests for the main web views
"""
import re
import os
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
class ViewTests(TestCase):
""" Tests for various top-level views """
username = 'test_user'
password = 'test_pass'
def setUp(self):
# Create a user
self.user = get_user_model().objects.create_user(self.username, '[email protected]', self.password)
self.user.set_password(self.password)
self.user.save()
result = self.client.login(username=self.username, password=self.password)
self.assertEqual(result, True)
def test_api_doc(self):
""" Test that the api-doc view works """
api_url = os.path.join(reverse('index'), 'api-doc') + '/'
response = self.client.get(api_url)
self.assertEqual(response.status_code, 200)
def test_index_redirect(self):
"""
top-level URL should redirect to "index" page
"""
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
def get_index_page(self):
"""
Retrieve the index page (used for subsequent unit tests)
"""
response = self.client.get("/index/")
self.assertEqual(response.status_code, 200)
return str(response.content.decode())
def test_panels(self):
"""
Test that the required 'panels' are present
"""
content = self.get_index_page()
self.assertIn("<div id='detail-panels'>", content)
# TODO: In future, run the javascript and ensure that the panels get created!
def test_js_load(self):
"""
Test that the required javascript files are loaded correctly
"""
# Change this number as more javascript files are added to the index page
N_SCRIPT_FILES = 39
content = self.get_index_page()
# Extract all required javascript files from the index page content
script_files = re.findall("<script type='text\\/javascript' src=\"([^\"]*)\"><\\/script>", content)
self.assertEqual(len(script_files), N_SCRIPT_FILES)
# TODO: Request the javascript files from the server, and ensure they are correcty loaded
| Python | 0.000003 | @@ -1889,10 +1889,10 @@
S =
-39
+40
%0A%0A
|
83b6e177fccaef7d62682c25a0e82f29bcba01e6 | Remove autofilling "GSSAPI" mechanism in hue.ini | desktop/core/src/desktop/lib/mapr_config_changer.py | desktop/core/src/desktop/lib/mapr_config_changer.py | import re
import os
GSSAPI = "GSSAPI"
MAPR_SECURITY = "MAPR-SECURITY"
KERBEROS_ENABLE = "kerberosEnable"
SECURE = "secure"
SECURITY_ENABLED = 'security_enabled'
MECHANISM = 'mechanism'
MAPR_CLUSTERS_CONF_PATH = "/opt/mapr/conf/mapr-clusters.conf"
templates = {
MECHANISM: 'none',
SECURITY_ENABLED: 'false'
}
def read_values_from_mapr_clusters_conf():
if not os.path.exists(MAPR_CLUSTERS_CONF_PATH):
return
mapr_clusters_conf = open(MAPR_CLUSTERS_CONF_PATH, "r").read()
cluster_props = dict(re.findall(r'(\S+)=(".*?"|\S+)', mapr_clusters_conf))
templates[SECURITY_ENABLED] = cluster_props[SECURE] if SECURE in cluster_props else "false"
if templates[SECURITY_ENABLED] == "true":
templates[MECHANISM] = MAPR_SECURITY
if (KERBEROS_ENABLE in cluster_props) and (cluster_props[KERBEROS_ENABLE] == "true"):
templates[MECHANISM] = GSSAPI
templateRegEx = re.compile(r'^\${(.+?)}')
def change_config(config):
for key in config:
if isinstance(config[key], dict):
change_config(config[key])
elif type(config[key]) == str:
match = templateRegEx.search(config[key])
if (match != None) and (match.group(1) in templates):
config[key] = templates[match.group(1)]
return config
def fill_templates(config):
read_values_from_mapr_clusters_conf()
change_config(config)
return config
| Python | 0 | @@ -18,26 +18,8 @@
os%0A%0A
-GSSAPI = %22GSSAPI%22%0A
MAPR
@@ -50,43 +50,8 @@
TY%22%0A
-KERBEROS_ENABLE = %22kerberosEnable%22%0A
SECU
@@ -689,131 +689,8 @@
TY%0A%0A
- if (KERBEROS_ENABLE in cluster_props) and (cluster_props%5BKERBEROS_ENABLE%5D == %22true%22):%0A templates%5BMECHANISM%5D = GSSAPI%0A%0A
%0Atem
|
13bf2bcbbfd079c75b84a993a86086493d2e6dee | Cleaned up output #2 | django_postgres/management/commands/sync_pgviews.py | django_postgres/management/commands/sync_pgviews.py | """Syncronise SQL Views.
"""
from django.core.management.base import BaseCommand
from django.db import models
from django_postgres.view import create_views
class Command(BaseCommand):
args = '<appname appname ...>'
help = 'Creates and Updates all SQL Views'
def handle(self, *args, **options):
"""Run the create_views command.
"""
self.stdout.write('Creating Views for {modules}'.format(modules=args))
if args:
for module in args:
create_views(module)
else:
self.handle_noargs(**options)
def handle_noargs(self, **options):
all_modules = models.get_apps()
self.stdout.write(
'Creating Views for {modules}'.format(modules=all_modules))
for module in all_modules:
create_views(module)
| Python | 0.998498 | @@ -356,16 +356,37 @@
%22%22%22%0A
+ if args:%0A
@@ -403,16 +403,33 @@
t.write(
+%0A
'Creatin
@@ -473,33 +473,16 @@
=args))%0A
- if args:%0A
@@ -602,13 +602,9 @@
ns)%0A
-
%0A
+
@@ -729,32 +729,45 @@
eating Views for
+ all modules:
%7Bmodules%7D'.form
@@ -761,32 +761,49 @@
odules%7D'.format(
+%0A
modules=all_modu
@@ -805,17 +805,39 @@
_modules
-)
+%0A )%0A
)%0A
|
0b5c75a9222fa399b8db45f67bfc514e7fc226f6 | fix typo | Lib/glyphsLib/builder/features.py | Lib/glyphsLib/builder/features.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
from fontTools.misc.py23 import round, unicode
import glyphsLib
from .constants import GLYPHLIB_PREFIX, PUBLIC_PREFIX
def autostr(automatic):
return '# automatic\n' if automatic else ''
def to_ufo_features(self, ufo):
"""Write an UFO's OpenType feature file."""
prefix_str = '\n\n'.join('# Prefix: %s\n%s%s' %
(prefix.name, autostr(prefix.automatic),
prefix.code.strip())
for prefix in self.font.featurePrefixes)
class_defs = []
for class_ in self.font.classes:
prefix = '@' if not class_.name.startswith('@') else ''
name = prefix + class_.name
class_defs.append('%s%s = [ %s ];' % (autostr(class_.automatic), name,
class_.code))
class_str = '\n\n'.join(class_defs)
feature_defs = []
for feature in self.font.features:
code = feature.code.strip()
lines = ['feature %s {' % name]
if feature.notes:
lines.append('# notes:')
lines.extend('# ' + line for line in feature.notes.splitlines())
if feature.automatic:
lines.append('# automatic')
if feature.disabled:
lines.append('# disabled')
lines.extend('#' + line for line in code.splitlines())
else:
lines.append(code)
lines.append('} %s;' % feature.name)
feature_defs.append('\n'.join(lines))
fea_str = '\n\n'.join(feature_defs)
gdef_str = _build_gdef(ufo)
# make sure feature text is a unicode string, for defcon
full_text = '\n\n'.join(
filter(None, [prefix_str, class_str, fea_str, gdef_str])) + '\n'
ufo.features.text = full_text if full_text.strip() else ''
def _build_gdef(ufo):
"""Build a table GDEF statement for ligature carets."""
from glyphsLib import glyphdata # Expensive import
bases, ligatures, marks, carets = set(), set(), set(), {}
category_key = GLYPHLIB_PREFIX + 'category'
subCategory_key = GLYPHLIB_PREFIX + 'subCategory'
for glyph in ufo:
has_attaching_anchor = False
for anchor in glyph.anchors:
name = anchor.name
if name and not name.startswith('_'):
has_attaching_anchor = True
if name and name.startswith('caret_') and 'x' in anchor:
carets.setdefault(glyph.name, []).append(round(anchor['x']))
lib = glyph.lib
glyphinfo = glyphdata.get_glyph(glyph.name)
# first check glyph.lib for category/subCategory overrides; else use
# global values from GlyphData
category = lib.get(category_key)
if category is None:
category = glyphinfo.category
subCategory = lib.get(subCategory_key)
if subCategory is None:
subCategory = glyphinfo.subCategory
# Glyphs.app assigns glyph classes like this:
#
# * Base: any glyph that has an attaching anchor
# (such as "top"; "_top" does not count) and is neither
# classified as Ligature nor Mark using the definitions below;
#
# * Ligature: if subCategory is "Ligature" and the glyph has
# at least one attaching anchor;
#
# * Mark: if category is "Mark" and subCategory is either
# "Nonspacing" or "Spacing Combining";
#
# * Compound: never assigned by Glyphs.app.
#
# https://github.com/googlei18n/glyphsLib/issues/85
# https://github.com/googlei18n/glyphsLib/pull/100#issuecomment-275430289
if subCategory == 'Ligature' and has_attaching_anchor:
ligatures.add(glyph.name)
elif category == 'Mark' and (subCategory == 'Nonspacing' or
subCategory == 'Spacing Combining'):
marks.add(glyph.name)
elif has_attaching_anchor:
bases.add(glyph.name)
if not any((bases, ligatures, marks, carets)):
return None
lines = ['table GDEF {', ' # automatic']
glyphOrder = ufo.lib[PUBLIC_PREFIX + 'glyphOrder']
glyphIndex = lambda glyph: glyphOrder.index(glyph)
fmt = lambda g: ('[%s]' % ' '.join(sorted(g, key=glyphIndex))) if g else ''
lines.extend([
' GlyphClassDef',
' %s, # Base' % fmt(bases),
' %s, # Liga' % fmt(ligatures),
' %s, # Mark' % fmt(marks),
' ;'])
for glyph, caretPos in sorted(carets.items()):
lines.append(' LigatureCaretByPos %s %s;' %
(glyph, ' '.join(unicode(p) for p in sorted(caretPos))))
lines.append('} GDEF;')
return '\n'.join(lines)
| Python | 0.000022 | @@ -1688,16 +1688,24 @@
%25s %7B' %25
+feature.
name%5D%0A
|
737adf50f39aafd59e56f47c0ede31c214530959 | Fix NaN handling | drivendata_validator/drivendata_validator.py | drivendata_validator/drivendata_validator.py | #!/usr/bin/python2
import sys
import json
import pandas as pd
import numpy as np
class DrivenDataValidationError(Exception):
""" Custom Exception class for validation errors that we can anticipate. These messages
are returned to the user. Other unanticipated exceptions get a generic message we pass to the user.
"""
def __init__(self, message, errors=None):
# Call the base class constructor with the parameters it needs
super(DrivenDataValidationError, self).__init__(message)
self.errors = errors
class DrivenDataValidator(object):
""" Validator class.
Accepts a dictionary that is passed to pandas.read_csv -- for options see:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
"""
def __init__(self, **read_csv_kwargs):
self.validation_kwargs = read_csv_kwargs
# default validation kwargs for pd.read_csv() if read_csv_kwargs is empty
if not self.validation_kwargs:
self.validation_kwargs = {
"index_col": 0,
"skipinitialspace": True
}
def validate(self, format_path, submission_path):
""" Validates that a submission is in the proper format
:param format_path: a string that is the path to the submission format file
:param submission_path: a string that is the path to the actual submission to validate
:return: The data frame for the submission if we pass
"""
# load the data
format_df = pd.read_csv(format_path, **self.validation_kwargs)
submission_df = pd.read_csv(submission_path, **self.validation_kwargs)
# verify that the headers match
if np.any(format_df.columns.values != submission_df.columns.values):
error_str = 'CSV Headers do not match. Submission requires that first line is: "{}" You submitted: "{}" '
# get all of the headers
format_headers = [format_df.index.name if format_df.index.name else ""] + \
format_df.columns.values.tolist()
sub_headers = [submission_df.index.name if submission_df.index.name else ""] + \
submission_df.columns.values.tolist()
raise DrivenDataValidationError(error_str.format(",".join(format_headers),
",".join(sub_headers)))
# verify the submission has the proper number of rows
if len(format_df.index) != len(submission_df.index):
error_str = 'Submission has {} rows but should have {}.'
raise DrivenDataValidationError(error_str.format(len(submission_df.index),
len(format_df.index)))
# verify the submission has the right row ids
if np.any(format_df.index.values != submission_df.index.values):
error_str = 'IDs for submission are not correct.'
raise DrivenDataValidationError(error_str)
# verify that the dtypes parse properly
if np.any(format_df.dtypes != submission_df.dtypes):
error_str = "Unexpected data types in submission. " \
"\n Expected dtypes: \t'{}' \n Submitted dtypes: \t'{}'"
raise DrivenDataValidationError(error_str.format(format_df.dtypes.values.tolist(),
submission_df.dtypes.values.tolist()))
# verify that there are no nans if we don't expect any nans
if np.isnan(submission_df.values).any() and not np.isnan(format_df.values).any():
error_str = 'Your submission contains NaNs or blanks, which are not expected. Please change these to ' \
'numeric predictions. See ids: {}'
# figure out which rows contain nans
nan_mask = np.isnan(submission_df.values).astype(int).sum(axis=1).astype(bool)
nan_ids = submission_df.index.values[nan_mask]
raise DrivenDataValidationError(error_str.format(nan_ids.tolist()))
return submission_df
def is_valid(self, format_path, submission_path, print_errors=False):
""" A wrapper around validate to return True/False
"""
try:
self.validate(format_path, submission_path)
return True
except Exception as e:
if print_errors:
print e.message
return False
def main():
# args are submission format, submission file, [optional] kwargs_json
if len(sys.argv) not in [3, 4]:
print "Usage: python DrivenDataValidator.py <path_to_submission_format_file> " \
"<path_to_your_submission_file> [<path_to_pandas_read_csv_kwargs_json>]"
else:
print "Checking all of your ducks to see if they are in a row...\n"
read_csv_kwargs = {}
if len(sys.argv) == 4:
with open(sys.argv[3], "r") as json_file:
read_csv_kwargs = json.load(json_file)
try:
validator = DrivenDataValidator(**read_csv_kwargs)
validator.validate(sys.argv[1], sys.argv[2])
print "Nice work, amig[a|o] Your submission is valid. Submit it on www.drivendata.org!"
except DrivenDataValidationError as anticipated_error:
print "Caught anticipated error. Fix the below and retry."
print "--------------------------------------------------"
print anticipated_error.message
except Exception as e:
print "Unanticipated error. What have you done??"
print "-----------------------------------------"
# re-raise so y'all can read the trace
raise
if __name__ == "__main__": # pragma: no cover (NB: tested with subprocess, so we don't see the coverage)
main() | Python | 0 | @@ -1184,19 +1184,50 @@
ion_path
+, skip_validating_dataset=False
):%0A
-
@@ -1636,86 +1636,572 @@
gs)%0A
- submission_df = pd.read_csv(submission_path, **self.validation_kwargs)
+%0A # automatically validate and return the dataframe if we're comparing something to itself.%0A # Refs:%0A # - Aristotle's law of identity, 'Metaphysics', 1st century CE%0A # - %22A is A.%22 - 'Atlas Shrugged', Ayn Rand, 1957 %3C- lulz%0A #%0A if format_path == submission_path:%0A return format_df%0A%0A submission_df = pd.read_csv(submission_path, **self.validation_kwargs)%0A%0A # just return the unadulterated df if we know this is what we're after%0A if skip_validating_dataset:%0A return submission_df
%0A%0A
@@ -4078,24 +4078,55 @@
ect any nans
+ (pd.isnull handles all dtypes)
%0A if
@@ -4125,24 +4125,25 @@
if
-n
p
+d
.isn
-an
+ull
(submiss
@@ -4171,24 +4171,25 @@
and not
-n
p
+d
.isn
-an
+ull
(format_
@@ -4459,16 +4459,17 @@
k =
-n
p
+d
.isn
-an
+ull
(sub
|
a44d2a9239e755b9e5726521b11aca9734b89180 | Fix crashing when Authorization is not set | ereuse_devicehub/resources/account/domain.py | ereuse_devicehub/resources/account/domain.py | import base64
from bson.objectid import ObjectId
from ereuse_devicehub.exceptions import WrongCredentials, BasicError, StandardError
from ereuse_devicehub.resources.account.role import Role
from ereuse_devicehub.resources.account.settings import AccountSettings
from ereuse_devicehub.resources.domain import Domain, ResourceNotFound
from ereuse_devicehub.utils import ClassProperty
from flask import current_app
from flask import g
from flask import request
from passlib.handlers.sha2_crypt import sha256_crypt
from werkzeug.http import parse_authorization_header
class AccountDomain(Domain):
resource_settings = AccountSettings
@staticmethod
def get_requested_database():
requested_database = request.path.split('/')[1]
if requested_database not in current_app.config['DATABASES']:
raise NotADatabase({'requested_path': requested_database})
else:
return requested_database
# noinspection PyNestedDecorators
@ClassProperty
@classmethod
def actual(cls) -> dict:
try:
# the values of g are inherited when doing inner requests so we need
# to always check the token in the headers (cls.actual_token)
# https://stackoverflow.com/questions/20036520/what-is-the-purpose-of-flasks-context-stacks
# http://stackoverflow.com/a/33382823/2710757
token = cls.actual_token
if not hasattr(g, '_actual_user') or g._actual_user['token'] != token:
from flask import request
try:
from flask import current_app as app
g._actual_user = AccountDomain.get_one({'token': token})
g._actual_user['role'] = Role(g._actual_user['role'])
except UserNotFound:
raise UserIsAnonymous("You need to be logged in.")
except TypeError:
raise NoUserForGivenToken()
return g._actual_user
except RuntimeError as e:
# Documentation access this variable
if str(e) != 'working outside of application context':
raise e
# noinspection PyNestedDecorators
@ClassProperty
@classmethod
def actual_token(cls) -> str:
"""Gets the **unhashed** token. Use `hash_token` to hash it."""
x = request.headers.environ['HTTP_AUTHORIZATION']
header = parse_authorization_header(x)
if header is None:
raise StandardError('The Authorization header is not well written: ' + x, 400)
return header['username']
@classmethod
def get_one(cls, id_or_filter: dict or ObjectId or str):
try:
return super().get_one(id_or_filter)
except ResourceNotFound:
raise UserNotFound()
@staticmethod
def import_key(key: str) -> str:
"""
Imports the key for the user
:param key: GPG Public Key
:raises CannotImportKey:
:return: Fingerprint of the imported key
"""
result = current_app.gpg.import_keys(key)
if result.count == 0:
raise CannotImportKey()
return result.fingerprint[0]
@staticmethod
def hash_token(token):
# Framework needs ':' at the end before send it to client
return base64.b64encode(str.encode(token + ':'))
@staticmethod
def encrypt_password(password: str) -> str:
return sha256_crypt.encrypt(password)
@staticmethod
def verify_password(password: str, original: str) -> bool:
return sha256_crypt.verify(password, original)
class UserIsAnonymous(WrongCredentials):
pass
class NoUserForGivenToken(WrongCredentials):
pass
class NotADatabase(BasicError):
status_code = 400
class CannotImportKey(StandardError):
status_code = 400
message = "We could not import the key. Make sure it is a valid GPG Public key."
class UserNotFound(ResourceNotFound):
pass
class WrongHeader(StandardError):
pass
| Python | 0.000004 | @@ -2331,24 +2331,41 @@
hash it.%22%22%22%0A
+ try:%0A
x =
@@ -2422,16 +2422,18 @@
-header =
+ return
par
@@ -2462,33 +2462,61 @@
r(x)
-%0A if header is Non
+%5B'username'%5D%0A except (KeyError, TypeError) as
e:%0A
@@ -2595,55 +2595,33 @@
tten
-: ' + x, 400)%0A return header%5B'username'%5D
+ or missing', 400) from e
%0A%0A
|
dba78b02daa5674769cccf56b867f5266d6ec0f1 | Add voluptuous to locative (#3254) | homeassistant/components/device_tracker/locative.py | homeassistant/components/device_tracker/locative.py | """
Support for the Locative platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.locative/
"""
import logging
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY, STATE_NOT_HOME
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
def setup_scanner(hass, config, see):
"""Setup an endpoint for the Locative application."""
hass.wsgi.register_view(LocativeView(hass, see))
return True
class LocativeView(HomeAssistantView):
"""View to handle locative requests."""
url = "/api/locative"
name = "api:locative"
def __init__(self, hass, see):
"""Initialize Locative url endpoints."""
super().__init__(hass)
self.see = see
def get(self, request):
"""Locative message received as GET."""
return self.post(request)
def post(self, request):
"""Locative message received."""
# pylint: disable=too-many-return-statements
data = request.values
if 'latitude' not in data or 'longitude' not in data:
return ("Latitude and longitude not specified.",
HTTP_UNPROCESSABLE_ENTITY)
if 'device' not in data:
_LOGGER.error("Device id not specified.")
return ("Device id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
if 'id' not in data:
_LOGGER.error("Location id not specified.")
return ("Location id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
if 'trigger' not in data:
_LOGGER.error("Trigger is not specified.")
return ("Trigger is not specified.",
HTTP_UNPROCESSABLE_ENTITY)
device = data['device'].replace('-', '')
location_name = data['id'].lower()
direction = data['trigger']
if direction == 'enter':
self.see(dev_id=device, location_name=location_name)
return "Setting location to {}".format(location_name)
elif direction == 'exit':
current_state = self.hass.states.get(
"{}.{}".format(DOMAIN, device))
if current_state is None or current_state.state == location_name:
self.see(dev_id=device, location_name=STATE_NOT_HOME)
return "Setting location to not home"
else:
# Ignore the message if it is telling us to exit a zone that we
# aren't currently in. This occurs when a zone is entered
# before the previous zone was exited. The enter message will
# be sent first, then the exit message will be sent second.
return 'Ignoring exit from {} (already in {})'.format(
location_name, current_state)
elif direction == 'test':
# In the app, a test message can be sent. Just return something to
# the user to let them know that it works.
return "Received test message."
else:
_LOGGER.error("Received unidentified message from Locative: %s",
direction)
return ("Received unidentified message: {}".format(direction),
HTTP_UNPROCESSABLE_ENTITY)
| Python | 0.00002 | @@ -194,67 +194,8 @@
ng%0A%0A
-from homeassistant.components.device_tracker import DOMAIN%0A
from
@@ -323,16 +323,139 @@
tantView
+%0A# pylint: disable=unused-import%0Afrom homeassistant.components.device_tracker import ( # NOQA%0A DOMAIN, PLATFORM_SCHEMA)
%0A%0A_LOGGE
@@ -775,17 +775,17 @@
url =
-%22
+'
/api/loc
@@ -789,17 +789,17 @@
locative
-%22
+'
%0A nam
@@ -802,17 +802,17 @@
name =
-%22
+'
api:loca
@@ -815,17 +815,17 @@
locative
-%22
+'
%0A%0A de
@@ -1304,17 +1304,17 @@
return (
-%22
+'
Latitude
@@ -1334,33 +1334,33 @@
e not specified.
-%22
+'
,%0A
@@ -1452,17 +1452,17 @@
R.error(
-%22
+'
Device i
@@ -1469,33 +1469,33 @@
d not specified.
-%22
+'
)%0A re
@@ -1500,17 +1500,17 @@
return (
-%22
+'
Device i
@@ -1517,33 +1517,33 @@
d not specified.
-%22
+'
,%0A
@@ -1631,17 +1631,17 @@
R.error(
-%22
+'
Location
@@ -1650,33 +1650,33 @@
d not specified.
-%22
+'
)%0A re
@@ -1681,17 +1681,17 @@
return (
-%22
+'
Location
@@ -1700,33 +1700,33 @@
d not specified.
-%22
+'
,%0A
@@ -1819,17 +1819,17 @@
R.error(
-%22
+'
Trigger
@@ -1845,17 +1845,17 @@
ecified.
-%22
+'
)%0A
@@ -1868,17 +1868,17 @@
return (
-%22
+'
Trigger
@@ -1894,17 +1894,17 @@
ecified.
-%22
+'
,%0A
@@ -2183,33 +2183,33 @@
return
-%22
+'
Setting location
@@ -2214,17 +2214,17 @@
on to %7B%7D
-%22
+'
.format(
@@ -2343,15 +2343,15 @@
-%22
+'
%7B%7D.%7B%7D
-%22
+'
.for
@@ -2543,17 +2543,17 @@
return
-%22
+'
Setting
@@ -2572,17 +2572,17 @@
not home
-%22
+'
%0A
@@ -3209,17 +3209,17 @@
return
-%22
+'
Received
@@ -3232,17 +3232,17 @@
message.
-%22
+'
%0A%0A
@@ -3275,17 +3275,17 @@
R.error(
-%22
+'
Received
@@ -3323,17 +3323,17 @@
tive: %25s
-%22
+'
,%0A
@@ -3383,17 +3383,17 @@
return (
-%22
+'
Received
@@ -3417,17 +3417,17 @@
sage: %7B%7D
-%22
+'
.format(
|
06277ea30094ff6669537f2365b6ad9f5a19642b | Update laundry.py | fxcmminer_v1.1/fxcmminer/cleaning/laundry.py | fxcmminer_v1.1/fxcmminer/cleaning/laundry.py | from event import CleanedDataEvent
class DataCleaner(object):
"""
Basic data cleaning
"""
def __init__(self, events_queue):
"""
"""
self.events_queue = events_queue
def _remove_duplicates(self, data):
"""
Drop any duplicates in the Datetime Index
"""
return data.reset_index().drop_duplicates(
subset='date', keep='last').set_index('date')
def _remove_not_a_number(self, data):
"""
Drop any rows that contain NaN values.
"""
return data.dropna()
def _remove_incorrect_values(
self, data, ao='askopen',ah='askhigh', al='asklow',
ac='askclose', bo='bidopen',bh='bidhigh', bl='bidlow',
bc='bidclose', v='volume'
):
"""
Removes errors from the open high low close values.
"""
data = data.loc[data[ac] <= data[ah]]
data = data.loc[data[ac] >= data[al]]
data = data.loc[data[ao] <= data[ah]]
data = data.loc[data[ao] >= data[al]]
data = data.loc[data[ah] >= data[al]]
data = data.loc[data[bc] <= data[bh]]
data = data.loc[data[bc] >= data[bl]]
data = data.loc[data[bo] <= data[bh]]
data = data.loc[data[bo] >= data[bl]]
data = data.loc[data[bh] >= data[bl]]
data = data.loc[data[v] >= 0]
return data
def clean_data(self, event):
data = self._remove_not_a_number(event.data)
data = self._remove_incorrect_values(data)
data = self._remove_duplicates(data)
self.events_queue.put(CleanedDataEvent(
data, event.instrument, event.time_frame))
| Python | 0.000001 | @@ -72,27 +72,357 @@
-Basic data cleaning
+The DataCleaner class is the process of correcting%0A (or removing) corrupt or inaccurate records from a record set%0A and refers to identifying incomplete, incorrect, inaccurate%0A or irrelevant parts of the data and then replacing,%0A modifying, or deleting the dirty or coarse data.%0A Most of the above is not implemented in the code below.
%0A
@@ -475,24 +475,36 @@
%22%22%22
-%0A
+ Initialize varables
%22%22%22%0A
@@ -589,32 +589,24 @@
%0A %22%22%22
-%0A
Drop any du
@@ -635,24 +635,16 @@
me Index
-%0A
%22%22%22%0A
@@ -803,24 +803,16 @@
%22%22%22
-%0A
Drop an
@@ -837,33 +837,24 @@
n NaN values
-.%0A
%22%22%22%0A
@@ -1085,24 +1085,16 @@
%22%22%22
-%0A
Removes
@@ -1140,17 +1140,8 @@
lues
-.%0A
%22%22%22
@@ -1660,16 +1660,16 @@
n data%0A%0A
-
def
@@ -1689,24 +1689,82 @@
lf, event):%0A
+ %22%22%22 Encapsulates the above cleaning processes %22%22%22%0A
data
|
4844ba065d86fdce3f01b7b191ecc6a4ef43661e | Add autoclass directives to Visualization/__init__.py. This will enable Visualization module methods to appear in function reference. | OpenPNM/Visualization/__init__.py | OpenPNM/Visualization/__init__.py | r"""
*******************************************************************************
:mod:`OpenPNM.Visualization`: Network Visualization
*******************************************************************************
.. module:: OpenPNM.Visualization
Contents
--------
tbd
.. note::
n/a
Import
------
>>> import OpenPNM as PNM
>>> tmp=PNM.Visualization ....
Submodules
----------
::
None --- No subpackages at the moment
"""
from __GenericVisualization__ import GenericVisualization
from __VTK__ import VTK
| Python | 0.000034 | @@ -292,174 +292,193 @@
a%0A %0A
-Import%0A------%0A%3E%3E%3E import OpenPNM as PNM%0A%3E%3E%3E tmp=PNM.Visualization ....%0A%0A%0ASubmodules%0A----------%0A::%0A%0A None --- No subpackages at the moment%0A%0A
+%0AClasses%0A-------%0A%0A.. autoclass:: GenericVisualization%0A :members:%0A :undoc-members:%0A :show-inheritance:%0A %0A.. autoclass:: VTK%0A :members:%0A :undoc-members:%0A :show-inheritance:
%0A
|
6e6ccc8566fe90323d900fd0ebd38f45ad4d0b63 | Update TipCalculator.py | PracticePrograms/TipCalculator.py | PracticePrograms/TipCalculator.py | '''
Author : DORIAN JAVA BROWN
Version : N/A
Copyright : All Rights Reserve; You may use, distribute and modify this code.
Description : This program provides the user with options on how much tip the customer should leave the waiter/waitress
'''
import os
total = 21.49
def cls():
os.system('cls' if os.name=='nt' else 'clear')
# menu
print('\n\n\t\t JUICEY BURGER\n\n')
print('')
print('1 Juicey Burger $ 18.99')
print('1 Orange Drink $ 1.00')
print('-------------------------------------------')
print('')
print('Sub Total: $ 19.99')
print('Local Tax: $ 1.50')
print('Bill Total: $ 21.49')
print('\n\n')
answer = raw_input('Correct ? ')
if answer == 'YES' or answer == 'Yes' or answer == 'yes' :
cls()
# tip suggestion list
print('\n\n\t Tip Suggestions')
print('----------------------------------')
print('A) %%20 $ %0.3f' %((total * .20)))
print('B) %%20 $ %0.3f' %((total * .20)))
print('C) %%20 $ %0.3f' %((total * .20)))
print('D) %%20 $ %0.3f' %((total * .20)))
elif answer == 'NO' or answer == 'No' or answer == 'no' :
print ('\n\n\t\t please wait one moment for assistance...\n\n')
else:
print('\n\n\t\t error:. invaild value \n\n')
#https://www.youtube.com/watch?annotation_id=annotation_3770292585&feature=iv&src_vid=bguKhMnvmb8&v=LtGEp9c6Z-U
| Python | 0 | @@ -920,26 +920,26 @@
rint('B) %25%25
-20
+15
$
@@ -950,34 +950,34 @@
3f' %25((total * .
-20
+15
)))%0A print('C)
@@ -975,25 +975,25 @@
rint('C) %25%25
-2
+1
0
@@ -1005,33 +1005,33 @@
3f' %25((total * .
-2
+1
0)))%0A print('D)
@@ -1032,20 +1032,20 @@
nt('D)
+
%25%25
-20
+5
@@ -1064,26 +1064,26 @@
%25((total * .
-2
0
+5
)))%0A %0A %0A
|
953ce15f2a3b2ffdc0e27d95afbe4f8cda2cdbfd | set default behavior to add datacenters | SoftLayer/CLI/image/datacenter.py | SoftLayer/CLI/image/datacenter.py | """Edit details of an image."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@click.option('--add/--remove',
default=False,
help="To add or remove Datacenter")
@click.argument('locations', nargs=-1, required=True)
@environment.pass_env
def cli(env, identifier, add, locations):
"""Add/Remove datacenter of an image."""
image_mgr = SoftLayer.ImageManager(env.client)
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
if add:
result = image_mgr.add_locations(image_id, locations)
else:
result = image_mgr.remove_locations(image_id, locations)
env.fout(result)
| Python | 0 | @@ -256,30 +256,16 @@
remove',
-%0A
default
@@ -269,12 +269,11 @@
ult=
-Fals
+Tru
e,%0A
|
704b2fb59857d6a59bf2223830236a4deb31d3e6 | Update test code | about_code_tool/tests/test_gen.py | about_code_tool/tests/test_gen.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) 2014-2016 nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import print_function
import unittest
import posixpath
from about_code_tool import Error
from about_code_tool import ERROR
from about_code_tool import INFO
from about_code_tool import CRITICAL
from about_code_tool import gen
from about_code_tool import model
from about_code_tool.tests import to_posix
from about_code_tool.tests import get_test_loc
from about_code_tool.tests import get_temp_dir
from unittest.case import expectedFailure
from collections import OrderedDict
class GenTest(unittest.TestCase):
def test_check_duplicated_columns(self):
test_file = get_test_loc('gen/dup_keys.csv')
expected = [Error(ERROR, u'Duplicated column name(s): copyright with copyright')]
result = gen.check_duplicated_columns(test_file)
assert expected == result
def test_check_duplicated_columns_handles_lower_upper_case(self):
test_file = get_test_loc('gen/dup_keys_with_diff_case.csv')
expected = [Error(ERROR, u'Duplicated column name(s): copyright with Copyright')]
result = gen.check_duplicated_columns(test_file)
assert expected == result
def test_load_inventory(self):
self.maxDiff = None
location = get_test_loc('gen/inv.csv')
base_dir = get_test_loc('inv')
errors, abouts = gen.load_inventory(location, base_dir)
expected_errors = [
Error(INFO, u'Field custom1 is a custom field'),
Error(CRITICAL, u'Field about_resource: Path . not found')]
assert expected_errors == errors
expected = [u'about_resource: .\n'
u'name: AboutCode\n'
u'version: 0.11.0\n'
u'custom1: |\n'
u' multi\n'
u' line\n']
result = [a.dumps(with_absent=False, with_empty=False)
for a in abouts]
assert expected == result
def test_generation_dir_endswith_space(self):
location = get_test_loc('inventory/complex/about_file_path_dir_endswith_space.csv')
gen_dir = get_temp_dir()
errors, abouts = gen.generate(location,
base_dir=gen_dir,
with_empty=False, with_absent=False)
expected_errors_msg = 'contains directory name ends with spaces which is not allowed. Generation skipped.'
assert (len(errors) == 1, 'Should return 1 error.')
assert expected_errors_msg in errors[0].message
def test_generation_with_no_about_resource(self):
location = get_test_loc('gen/inv2.csv')
gen_dir = get_temp_dir()
errors, abouts = gen.generate(location,
base_dir=gen_dir,
with_empty=False, with_absent=False)
expected_dict = OrderedDict()
expected_dict[u'.'] = None
assert abouts[0].about_resource.value == expected_dict
assert len(errors) == 0
def test_generation_with_no_about_resource_reference(self):
location = get_test_loc('gen/inv3.csv')
gen_dir = get_temp_dir()
errors, abouts = gen.generate(location,
base_dir=gen_dir,
with_empty=False, with_absent=False)
expected_dict = OrderedDict()
expected_dict[u'test.tar.gz'] = None
assert abouts[0].about_resource.value == expected_dict
assert len(errors) == 1
msg = u'The reference file'
assert msg in errors[0].message
@expectedFailure
def test_generate(self):
location = get_test_loc('gen/inv.csv')
gen_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir=gen_dir,
with_empty=False, with_absent=False)
expected_errors = [Error(INFO, u'Field custom1 is a custom field')]
assert expected_errors == errors
gen_loc = posixpath.join(to_posix(gen_dir), 'inv', 'this.ABOUT')
about = model.About(location=gen_loc)
on_disk_result = about.dumps(with_absent=False, with_empty=False)
in_mem_result = [a.dumps(with_absent=False, with_empty=False)
for a in abouts][0]
expected = (u'about_resource: .\n'
u'name: AboutCode\n'
u'version: 0.11.0\n'
u'custom1: |\n'
u' multi\n'
u' line\n')
assert expected == on_disk_result
assert expected == in_mem_result
@expectedFailure
def test_generate_complex_inventory(self):
location = get_test_loc('inventory/complex/about/expected.csv')
gen_dir = get_temp_dir()
errors, abouts = gen.generate(location,
base_dir=gen_dir,
with_empty=False, with_absent=False)
expected_errors = [Error(INFO, u'Field custom1 is a custom field')]
assert expected_errors == errors
gen_loc = posixpath.join(to_posix(gen_dir), 'inv', 'this.ABOUT')
about = model.About(location=gen_loc)
on_disk_result = about.dumps(with_absent=False, with_empty=False)
in_mem_result = [a.dumps(with_absent=False, with_empty=False)
for a in abouts][0]
expected = (u'about_resource: .\n'
u'name: AboutCode\n'
u'version: 0.11.0\n'
u'custom1: multi\n'
u' line\n')
assert expected == on_disk_result
assert expected == in_mem_result
| Python | 0.000001 | @@ -1534,32 +1534,70 @@
t with copyright
+%5CnPlease correct the input and re-run.
')%5D%0A resu
@@ -1900,16 +1900,54 @@
opyright
+%5CnPlease correct the input and re-run.
')%5D%0A
|
1c31d23dd95fb4ca8a5daaf37aaa7d75472f1d24 | fix run time error: list indices must be integers not str (#611) | src/harness/reference_models/pre_iap_filtering/pre_iap_filtering.py | src/harness/reference_models/pre_iap_filtering/pre_iap_filtering.py | # Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
==================================================================================
This is the main Pre-IAP reference model which invokes the sub pre-IAP reference
models to filter out grants and CBSDs before IAP model is invoked.
==================================================================================
"""
from reference_models.pre_iap_filtering import fss_purge
from reference_models.pre_iap_filtering import inter_sas_duplicate_grant
from reference_models.pre_iap_filtering import zone_purge
from reference_models.pre_iap_filtering import pre_iap_util
def preIapReferenceModel(protected_entities, sas_uut_fad, sas_test_harness_fads):
""" The main function that invokes all pre-IAP filtering models.
The grants/CBSDs to be purged are removed from the input parameters.
Args:
protected_entities: A dictionary containing the list of protected entities. The key
is a protected enity type and the value is a list of corresponding protected
entity records. The format is {'entityName':[record1, record2]}.
sas_uut_fad: A FullActivityDump object containing the FAD records of SAS UUT.
sas_test_harness_fads: A list of FullActivityDump objects containing the FAD records
from SAS test harnesses.
"""
# Invoke Inter SAS duplicate grant purge list reference model
inter_sas_duplicate_grant.interSasDuplicateGrantPurgeReferenceModel(sas_uut_fad,
sas_test_harness_fads)
# Invoke PPA, EXZ, GWPZ, and FSS+GWBL purge list reference models
list_of_fss_neighboring_gwbl = pre_iap_util.getFssNeighboringGwbl(
protected_entities['gwblRecords'],
protected_entities['fssRecords'])
zone_purge.zonePurgeReferenceModel(sas_uut_fad,
sas_test_harness_fads,
protected_entities['ppaRecords'],
protected_entities['palRecords'],
protected_entities['gwpzRecords'],
list_of_fss_neighboring_gwbl)
# Invoke FSS purge list reference model
if 'fssRecords' in protected_entities:
fss_purge.fssPurgeReferenceModel(sas_uut_fad, sas_test_harness_fads,
protected_entities['fssRecords'])
| Python | 0.000014 | @@ -2194,16 +2194,266 @@
models%0A
+ # Initialize expected keys in protected_entities to empty array if type does not exist%0A for key in %5B'gwblRecords', 'fssRecords', 'ppaRecords', 'palRecords', 'gwpzRecords'%5D:%0A if key not in protected_entities:%0A protected_entities%5Bkey%5D = %5B%5D%0A%0A
list_o
|
cdd28cba2c6299e18b5d5221f8d10b8649c1faed | Use numpy | tests/chainer_tests/functions_tests/array_tests/test_expand_dims.py | tests/chainer_tests/functions_tests/array_tests/test_expand_dims.py | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': 0},
{'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': 1},
{'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': 2},
{'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': -1},
{'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': -2},
{'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': -3},
)
class TestExpandDims(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.in_shape) \
.astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, self.out_shape) \
.astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.expand_dims(x, self.axis)
self.assertEqual(y.data.shape, self.out_shape)
y_expect = numpy.expand_dims(cuda.to_cpu(x_data), self.axis)
cuda.cupy.testing.assert_array_equal(y.data, y_expect)
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = functions.expand_dims(x, self.axis)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x_data,))
gx, = gradient_check.numerical_grad(f, (x_data,), (y_grad,))
gradient_check.assert_allclose(cuda.to_cpu(x.grad),
cuda.to_cpu(gx))
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward(self):
self.check_forward(cuda.to_gpu(self.x))
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| Python | 0.00006 | @@ -1140,15 +1140,11 @@
-cuda.cu
+num
py.t
@@ -1169,22 +1169,35 @@
y_equal(
+cuda.to_cpu(
y.data
+)
, y_expe
|
9323df973584b86355e19229f4beb8ebc7981693 | remove `gamma` and `Decoonvolution`s | tests/chainer_tests/link_hooks_tests/test_weight_standardization.py | tests/chainer_tests/link_hooks_tests/test_weight_standardization.py | import unittest
import numpy
import pytest
import chainer
from chainer.backends import cuda
from chainer.link_hooks.weight_standardization import WeightStandardization
import chainer.links as L
from chainer import testing
from chainer.testing import attr
class TestExceptions(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.ones((10, 5), dtype=numpy.float32))
self.layer = L.Linear(5, 20)
def test_wrong_weight_name(self):
wrong_Weight_name = 'w'
hook = WeightStandardization(weight_name=wrong_Weight_name)
with pytest.raises(ValueError):
self.layer.add_hook(hook)
def test_raises(self):
with pytest.raises(NotImplementedError):
with WeightStandardization():
self.layer(self.x)
def test_invalid_shaped_weight(self):
with pytest.raises(ValueError):
L.Linear(10, 0).add_hook(WeightStandardization())
class BaseTest(object):
def test_add_ws_hook(self):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
if self.lazy_init:
with chainer.using_config('train', False):
layer(self.x)
if not self.use_gamma:
assert not hasattr(layer, 'gamma')
else: # Use gamma parameter
assert hasattr(layer, 'gamma')
assert layer.gamma.ndim == 0 and layer.gamma.size == 1
def _init_layer(self):
hook = WeightStandardization()
layer = self.layer
layer.add_hook(hook)
if self.lazy_init:
# Initialize weight and bias.
with chainer.using_config('train', False):
layer(self.x)
return layer, hook
def check_weight_is_parameter(self, gpu):
layer, hook = self._init_layer()
if gpu:
layer = layer.to_gpu()
source_weight = getattr(layer, hook.weight_name)
x = cuda.to_gpu(self.x) if gpu else self.x
layer(x)
assert getattr(layer, hook.weight_name) is source_weight
def test_weight_is_parameter_cpu(self):
if not self.lazy_init:
self.check_weight_is_parameter(False)
@attr.gpu
def test_weight_is_parameter_gpu(self):
if not self.lazy_init:
self.check_weight_is_parameter(True)
def check_deleted(self, gpu):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
if gpu:
layer = layer.to_gpu()
x = cuda.to_gpu(self.x) if gpu else self.x
y1 = layer(x).array
with chainer.using_config('train', False):
y2 = layer(x).array
layer.delete_hook(hook.name)
y3 = layer(x).array
if gpu:
y1, y2, y3 = cuda.to_cpu(y1), cuda.to_cpu(y2), cuda.to_cpu(y3)
assert not numpy.array_equal(y1, y3)
assert not numpy.array_equal(y2, y3)
def test_deleted_cpu(self):
self.check_deleted(False)
@attr.gpu
def test_deleted_gpu(self):
self.check_deleted(True)
class TestEmbedID(unittest.TestCase, BaseTest):
def setUp(self):
self.lazy_init = False # For convenience.
self.bs, self.in_size, self.out_size = 5, 10, 20
self.x = numpy.arange(self.in_size, dtype=numpy.int32)
self.layer = L.EmbedID(self.in_size, self.out_size)
self.hook = WeightStandardization()
def test_add_ws_hook(self):
hook = WeightStandardization()
layer = self.layer
layer.add_hook(hook)
if self.lazy_init:
with chainer.using_config('train', False):
layer(self.x)
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
}))
class TestLinear(unittest.TestCase, BaseTest):
def setUp(self):
self.bs, self.in_size, self.out_size = 10, 20, 30
self.x = numpy.random.normal(
size=(self.bs, self.in_size)).astype(numpy.float32)
self.layer = L.Linear(self.out_size) # Lazy initialization
in_size = None if self.lazy_init else self.in_size
self.layer = L.Linear(in_size, self.out_size)
self.hook = WeightStandardization()
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'link': [L.Convolution1D, L.Deconvolution1D],
}))
class TestConvolution1D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4)).astype(numpy.float32)
self.hook = WeightStandardization()
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'link': [L.Convolution2D, L.Deconvolution2D],
}))
class TestConvolution2D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4)).astype(numpy.float32)
self.hook = WeightStandardization()
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'link': [L.Convolution3D, L.Deconvolution3D],
}))
class TestConvolution3D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4, 4)).astype(numpy.float32)
self.hook = WeightStandardization()
self.out_size = self.out_channels # For compatibility
testing.run_module(__name__, __file__)
| Python | 0.000151 | @@ -1191,233 +1191,8 @@
f.x)
-%0A if not self.use_gamma:%0A assert not hasattr(layer, 'gamma')%0A else: # Use gamma parameter%0A assert hasattr(layer, 'gamma')%0A assert layer.gamma.ndim == 0 and layer.gamma.size == 1
%0A%0A
@@ -4028,30 +4028,11 @@
on1D
-, L.Deconvolution1D
%5D,%0A
+
%7D))%0A
@@ -4698,27 +4698,8 @@
on2D
-, L.Deconvolution2D
%5D,%0A%7D
@@ -5331,32 +5331,32 @@
%5BTrue, False%5D,%0A
+
'link': %5BL.C
@@ -5371,27 +5371,8 @@
on3D
-, L.Deconvolution3D
%5D,%0A%7D
|
b2e0812f6946f88cef2cdffb0be603b4b769033a | rename run_test to execute in streams simple benchmark (#4941) | tests/kafkatest/benchmarks/streams/streams_simple_benchmark_test.py | tests/kafkatest/benchmarks/streams/streams_simple_benchmark_test.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from ducktape.mark.resource import cluster
from ducktape.mark import parametrize, matrix
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.performance.streams_performance import StreamsSimpleBenchmarkService
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.version import DEV_BRANCH
STREAMS_SIMPLE_TESTS = ["streamprocess", "streamprocesswithsink", "streamprocesswithstatestore", "streamprocesswithwindowstore"]
STREAMS_COUNT_TESTS = ["streamcount", "streamcountwindowed"]
STREAMS_JOIN_TESTS = ["streamtablejoin", "streamstreamjoin", "tabletablejoin"]
NON_STREAMS_TESTS = ["consume", "consumeproduce"]
ALL_TEST = "all"
STREAMS_SIMPLE_TEST = "streams-simple"
STREAMS_COUNT_TEST = "streams-count"
STREAMS_JOIN_TEST = "streams-join"
class StreamsSimpleBenchmarkTest(Test):
"""
Simple benchmark of Kafka Streams.
"""
def __init__(self, test_context):
super(StreamsSimpleBenchmarkTest, self).__init__(test_context)
# these values could be updated in ad-hoc benchmarks
self.key_skew = 0
self.value_size = 1024
self.num_records = 10000000L
self.num_threads = 1
self.replication = 1
@cluster(num_nodes=12)
@matrix(test=["consume", "consumeproduce", "streams-simple", "streams-count", "streams-join"], scale=[1])
def test_simple_benchmark(self, test, scale):
"""
Run simple Kafka Streams benchmark
"""
self.driver = [None] * (scale + 1)
self.final = {}
#############
# SETUP PHASE
#############
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.kafka = KafkaService(self.test_context, num_nodes=scale, zk=self.zk, version=DEV_BRANCH, topics={
'simpleBenchmarkSourceTopic1' : { 'partitions': scale, 'replication-factor': self.replication },
'simpleBenchmarkSourceTopic2' : { 'partitions': scale, 'replication-factor': self.replication },
'simpleBenchmarkSinkTopic' : { 'partitions': scale, 'replication-factor': self.replication },
'yahooCampaigns' : { 'partitions': 20, 'replication-factor': self.replication },
'yahooEvents' : { 'partitions': 20, 'replication-factor': self.replication }
})
self.kafka.log_level = "INFO"
self.kafka.start()
load_test = ""
if test == ALL_TEST:
load_test = "load-two"
if test in STREAMS_JOIN_TESTS or test == STREAMS_JOIN_TEST:
load_test = "load-two"
if test in STREAMS_COUNT_TESTS or test == STREAMS_COUNT_TEST:
load_test = "load-one"
if test in STREAMS_SIMPLE_TESTS or test == STREAMS_SIMPLE_TEST:
load_test = "load-one"
if test in NON_STREAMS_TESTS:
load_test = "load-one"
################
# LOAD PHASE
################
self.load_driver = StreamsSimpleBenchmarkService(self.test_context,
self.kafka,
load_test,
self.num_threads,
self.num_records,
self.key_skew,
self.value_size)
self.load_driver.start()
self.load_driver.wait(3600) # wait at most 30 minutes
self.load_driver.stop()
if test == ALL_TEST:
for single_test in STREAMS_SIMPLE_TESTS + STREAMS_COUNT_TESTS + STREAMS_JOIN_TESTS:
self.run_test(single_test, scale)
elif test == STREAMS_SIMPLE_TEST:
for single_test in STREAMS_SIMPLE_TESTS:
self.run_test(single_test, scale)
elif test == STREAMS_COUNT_TEST:
for single_test in STREAMS_COUNT_TESTS:
self.run_test(single_test, scale)
elif test == STREAMS_JOIN_TEST:
for single_test in STREAMS_JOIN_TESTS:
self.run_test(single_test, scale)
else:
self.run_test(test, scale)
return self.final
def run_test(self, test, scale):
################
# RUN PHASE
################
for num in range(0, scale):
self.driver[num] = StreamsSimpleBenchmarkService(self.test_context,
self.kafka,
test,
self.num_threads,
self.num_records,
self.key_skew,
self.value_size)
self.driver[num].start()
#######################
# STOP + COLLECT PHASE
#######################
data = [None] * (scale)
for num in range(0, scale):
self.driver[num].wait()
self.driver[num].stop()
self.driver[num].node.account.ssh("grep Performance %s" % self.driver[num].STDOUT_FILE, allow_fail=False)
data[num] = self.driver[num].collect_data(self.driver[num].node, "")
self.driver[num].read_jmx_output_all_nodes()
for num in range(0, scale):
for key in data[num]:
self.final[key + "-" + str(num)] = data[num][key]
for key in sorted(self.driver[num].jmx_stats[0]):
self.logger.info("%s: %s" % (key, self.driver[num].jmx_stats[0][key]))
self.final[test + "-jmx-avg-" + str(num)] = self.driver[num].average_jmx_value
self.final[test + "-jmx-max-" + str(num)] = self.driver[num].maximum_jmx_value
| Python | 0 | @@ -4568,32 +4568,31 @@
self.
-run_test
+execute
(single_test
@@ -4712,32 +4712,31 @@
self.
-run_test
+execute
(single_test
@@ -4854,32 +4854,31 @@
self.
-run_test
+execute
(single_test
@@ -4998,24 +4998,23 @@
self.
-run_test
+execute
(single_
@@ -5057,24 +5057,23 @@
self.
-run_test
+execute
(test, s
@@ -5118,16 +5118,15 @@
def
-run_test
+execute
(sel
|
d1fa13bdf3ca7d1c4eabdaace5758d6b031ef909 | Set up the download url, which I forgot about. | exampleSettings/urls.py | exampleSettings/urls.py | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'post.views.list', name='home'),
(r'^post/(.*)/$', 'post.views.post'),
# Examples:
# url(r'^$', 'exampleSettings.views.home', name='home'),
# url(r'^exampleSettings/', include('exampleSettings.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'^edit/(.*)/$', 'post.views.edit'),
(r'^create/$', 'post.views.create'),
(r'^tag/(.*)/$', 'post.views.tag'),
(r'^tagcloud/$', 'post.views.tagcloud' ),
(r'^editorg/(.*)/$', 'organization.views.orgedit'),
#Search urls
(r'^search/', include('haystack.urls')),
#captcha urls
url(r'^captcha/', include('captcha.urls')),
(r'^register/$', 'userProfile.views.register'),
(r'^login/$', 'userProfile.views.login_user'),
(r'^logout/$', 'userProfile.views.logout_user'),
(r'^editProfile/$', 'userProfile.views.edit'),
#preview pages for the STL files.
(r'^thumbs/jsc3d/(.*)', 'thumbnailer.views.stlthumb'),
(r'^preview/jsc3d/(.*)', 'thumbnailer.views.stlview'),
url(r'', include('multiuploader.urls')),
(r'^taggit_autosuggest/', include('taggit_autosuggest.urls')),
#user profile pages
url(r'^userProfile/', include('userProfile.urls')),
(r'^editUser/', 'userProfile.views.edit'),
)+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| Python | 0 | @@ -912,65 +912,8 @@
),%0A%0A
- (r'%5Eeditorg/(.*)/$', 'organization.views.orgedit'),%0A%0A
@@ -1641,16 +1641,75 @@
edit'),%0A
+ (r'%5Edownload/(.*)/$', 'filemanager.views.download'),%0A%0A%0A
%0A)+ stat
|
5b1eefb315cd9094de8c8827e0f3a8c0eeefe95a | delete view: make sure item is closed before it is removed from storage | bepasty/views/delete.py | bepasty/views/delete.py | # Copyright: 2014 Dennis Schmalacker <[email protected]>
# License: BSD 2-clause, see LICENSE for details.
import errno
from flask import current_app, redirect, url_for, render_template, abort
from flask.views import MethodView
from werkzeug.exceptions import NotFound
from . import blueprint
from ..utils.permissions import *
class DeleteView(MethodView):
def get(self, name):
if not may(DELETE):
abort(403)
try:
item = current_app.storage.open(name)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return render_template('file_not_found.html'), 404
raise
if item.meta.get('locked'):
error = 'File locked.'
elif not item.meta.get('complete'):
error = 'Upload incomplete. Try again later.'
else:
error = None
if error:
try:
return render_template('display_error.html', name=name, item=item, error=error), 409
finally:
item.close()
try:
item = current_app.storage.remove(name)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return render_template('file_not_found.html'), 404
return redirect(url_for('bepasty.display', name=name))
blueprint.add_url_rule('/<itemname:name>/+delete', view_func=DeleteView.as_view('delete'))
| Python | 0 | @@ -447,38 +447,36 @@
ry:%0A
+w
it
-em =
+h
current_app.sto
@@ -486,24 +486,33 @@
e.open(name)
+ as item:
%0A exc
@@ -512,166 +512,8 @@
-except (OSError, IOError) as e:%0A if e.errno == errno.ENOENT:%0A return render_template('file_not_found.html'), 404%0A raise%0A%0A
@@ -548,32 +548,40 @@
'):%0A
+
+
error = 'File lo
@@ -587,16 +587,24 @@
ocked.'%0A
+
@@ -635,24 +635,32 @@
complete'):%0A
+
@@ -717,14 +717,30 @@
-else:%0A
+ else:%0A
@@ -764,24 +764,32 @@
one%0A
+
+
if error:%0A
@@ -791,28 +791,16 @@
r:%0A
- try:%0A
@@ -798,33 +798,32 @@
-
return render_te
@@ -891,16 +891,17 @@
r), 409%0A
+%0A
@@ -907,79 +907,8 @@
- finally:%0A item.close()%0A%0A try:%0A item =
cur
@@ -937,16 +937,17 @@
e(name)%0A
+%0A
@@ -1084,16 +1084,34 @@
l'), 404
+%0A raise
%0A%0A
@@ -1168,16 +1168,17 @@
name))%0A%0A
+%0A
blueprin
|
1596b1b782b96c89f0a06710dd3e75b65defc3d8 | Fix validating no parms for led_matrix methods | cnavsense/services/led_matrix.py | cnavsense/services/led_matrix.py | import os
from zmqservices import services
from cnavconstants.servers import (
LOCAL_LED_MATRIX_ADDRESS, LED_MATRIX_PORT_ADDRESS
)
from cnavsense import settings
from cnavsense.utils import sentry, logger
class LedMatrix(services.JsonrpcServerResource):
def __init__(self, *args, **kwargs):
super(LedMatrix, self).__init__(*args, **kwargs)
self.driver = kwargs.pop('driver', settings.SENSE_HAT_DRIVER)
self.endpoints = {
'set_rotation': self.validate_set_rotation_params,
'set_pixels': self.validate_set_pixels_params,
'set_pixel': self.validate_set_pixel_params,
'get_pixel': self.validate_get_pixel_params,
'load_image': self.validate_load_image_params,
'flip_horizontally': self.validate_no_params,
'flip_vertically': self.validate_no_params,
'clear': self.validate_no_params,
'set_colour': self.validate_set_colour_params,
'show_message': self.validate_text_and_back_colour_params,
'show_letter': self.validate_text_and_back_colour_params,
'set_low_light': (
lambda x: True if (x is True or x is False) else False
),
}
def run(self, *args, **kwargs):
with sentry():
super(LedMatrix, self).run(*args, **kwargs)
@staticmethod
def validate_no_params(params=None):
return params is None
@staticmethod
def validate_set_rotation_params(params):
return params.get('value') in (0, 90, 180, 270)
def set_rotation(self, value):
self.driver.set_rotation(value)
def flip_horizontally(self):
self.driver.flip_h()
def flip_vertically(self):
self.driver.flip_v()
@staticmethod
def pixel_is_valid(pixel):
return pixel and (len(pixel) == 3 and (
all((
(isinstance(colour, int) and colour in range(0, 255))
for colour in pixel
))
))
@staticmethod
def validate_set_pixels_params(params):
pixels = params.get('pixels')
return (len(pixels) == 64) and [
pixel for pixel in pixels if LedMatrix.pixel_is_valid(pixel)
]
def set_pixels(self, pixels):
if self.pixels_are_valid(pixels):
self.driver.set_pixels(pixels)
else:
logger.error('Invalid pixels provided: "{}"'.format(pixels))
def get_pixels(self):
return self.driver.get_pixels()
@staticmethod
def validate_set_pixel_params(params):
valid_range = range(0, 7)
xy = params.get('x') in valid_range and params.get('y') in valid_range
return xy and LedMatrix.pixel_is_valid(params.get('pixel'))
def set_pixel(self, x, y, pixel):
return self.driver.set_pixel(x, y, pixel)
@staticmethod
def validate_get_pixel_params(params):
valid_range = range(0, 7)
return (
params.get('x') in valid_range and params.get('y') in valid_range
)
def get_pixel(self, x, y):
return self.driver.get_pixel(x, y)
@staticmethod
def valid_path(path):
return os.path.exists(path)
@staticmethod
def validate_load_image_params(params):
return os.path.exists(params.get('file_path'))
def load_image(self, file_path):
return self.driver.load_image(file_path)
def clear(self):
self.driver.clear()
@staticmethod
def validate_set_colour_params(params):
return LedMatrix.pixel_is_valid(params.get('colour'))
def set_colour(self, colour):
self.driver.clear(colour)
@staticmethod
def validate_text_and_back_colour_params(params):
return all((
LedMatrix.pixel_is_valid(p) for p in (
params.get('text_colour'), params.get('back_colour')
)
))
def show_message(
self, text, scroll_speed=0.1,
text_colour=(255, 255, 255), back_colour=(0, 0, 0)):
self.driver.show_message(
text,
scroll_speed=scroll_speed,
text_colour=text_colour,
back_colour=back_colour,
)
def show_letter(
self, letter, text_colour=(255, 255, 255), back_colour=(0, 0, 0)):
self.driver.show_letter(
letter, text_colour=text_colour, back_colour=back_colour
)
logger.error('Invalid pixels provided: "{}, {}"'.format(
text_colour, back_colour
))
def set_low_light(self, on=True):
self.driver.low_light = on
class Service(services.JsonrpcServer):
name = 'led_matrix'
resource = LedMatrix
address = LOCAL_LED_MATRIX_ADDRESS
port = LED_MATRIX_PORT_ADDRESS
def start():
return Service().start()
if __name__ == '__main__':
start()
| Python | 0.000002 | @@ -1430,22 +1430,18 @@
urn
+not
params
- is None
%0A%0A
|
aa5259efac8f7fbe8e2afd263198feaaa45fc4c3 | Change test for running on Tingbot | tingbot/platform_specific/__init__.py | tingbot/platform_specific/__init__.py | import platform
def is_tingbot():
"""return True if running as a tingbot. We can update this function to be more smart in future"""
return platform.machine().startswith('armv71')
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
| Python | 0 | @@ -8,17 +8,21 @@
platform
+, os
%0A
-
%0Adef is_
@@ -39,17 +39,22 @@
%0A %22%22%22
-r
+%0A R
eturn Tr
@@ -84,117 +84,149 @@
bot.
- We can update this function to be more smart in future%22%22%22%0A return platform.machine().startswith('armv71')
+%0A %22%22%22%0A # TB_RUN_ON_LCD is an environment variable set by tbprocessd when running tingbot apps.%0A return 'TB_RUN_ON_LCD' in os.environ
%0A%0Aif
|
998fe3252403553efb3bf022510b12c0a219f355 | Test coverage | playkit/api.py | playkit/api.py | import requests
from bs4 import BeautifulSoup
def search(keyword="",category="apps",country="us",pricing="all",rating="all",format="dict",proxies=None):
requests.packages.urllib3.disable_warnings()
priceMap = {"all":0,"free":1,"paid":2,"All":0,"Free":1,"Paid":2}
ratingsMap = {"all":0,"4+":1,"All":0}
response = {"status":"OK","error":None,"results":[]}
try:
payload = {'q': keyword, 'c': category,'gl': country,"price":priceMap[pricing],"rating":ratingsMap[rating]}
except KeyError as e:
response = {"status":"Failed","error":e,"results":[]}
return response
url = "https://play.google.com/store/search?"
try:
if proxies:
htmlresponse = requests.get(url,params = payload,proxies=proxies)
else:
htmlresponse = requests.get(url,params = payload)
htmlresponse = BeautifulSoup(htmlresponse.text, 'html.parser')
contents = htmlresponse.find_all("div",class_="id-card-list" )[0].findAll("div", { "class" : "card-content id-track-click id-track-impression" })
for content in contents:
result = {}
result["name"] = content.find("a", {"class":"title"})["title"]
result["id"] = content.find("span", {"class":"preview-overlay-container"})["data-docid"]
result["developer"] = content.find("a", {"class":"subtitle"})["title"]
result["description"] = content.find("div", {"class":"description"}).getText()
try:
result["rating"] = content.find("div", {"class":"tiny-star star-rating-non-editable-container"})["aria-label"]
except TypeError:
result["rating"] = "Not rated"
result["price"] = content.findAll("span", {"class":"display-price"})[0].getText()
result["largeImageUrl"] = content.find("img", {"class":"cover-image"})["data-cover-large"]
result["smallImageUrl"] = content.find("img", {"class":"cover-image"})["data-cover-small"]
response["results"].append(result)
return response
except Exception as e:
response["error"] = e
response["status"] = "Failed"
return response
def appdetails(identifier="",format="dict",proxies=None):
requests.packages.urllib3.disable_warnings()
response = {"status":"OK","error":None,"results":{}}
try:
payload = {'id': identifier}
except KeyError as e:
response = {"status":"Failed","error":e,"results":{}}
return response
url = "https://play.google.com/store/apps/details?"
try:
if proxies:
htmlresponse = requests.get(url,params = payload,proxies=proxies)
else:
htmlresponse = requests.get(url,params = payload)
result = {}
htmlresponse = BeautifulSoup(htmlresponse.text, 'html.parser')
contents = htmlresponse.find("div", { "class" : "details-wrapper apps square-cover id-track-partial-impression id-deep-link-item" })
result["id"] = contents["data-docid"]
result["name"] = contents.find("div",{"class":"document-title"}).getText()
price = contents.find("meta",{"itemprop":"price"})["content"].replace(u'\xa0',u' ')
result["price"] = price if price !='0' else 'Free'
result["category"] = contents.find("a",{"class":"document-subtitle category"}).getText()
result["developer"] = contents.find("a",{"class":"document-subtitle primary"}).getText()
result["description"] = contents.find("div",{"class":"show-more-content text-body"}).getText()
result["mediumImageUrl"] = contents.find("img",{"class":"cover-image"})['src']
try:
result["rating"] = contents.find("div", {"class":"tiny-star star-rating-non-editable-container"})["aria-label"]
except TypeError:
result["rating"] = "Not rated"
try:
res = contents.find("div",{"class":"thumbnails","data-expand-target":"thumbnails"}).findAll('img',{"class":"screenshot"})
result['screenshots'] =[]
for screenshot in res:
result['screenshots'].append(screenshot["src"])
except Exception,e:
result['screenshots'] =[]
try:
reviewContents = htmlresponse.find("div", { "class" : "details-wrapper apps" })
result["ratingValue"] = reviewContents.find("meta",{"itemprop":"ratingValue"})["content"]
reviewContent = reviewContents.find("div",{"class":"preview-reviews multicol"}).findAll("div",{"featured-review"})
result['review'] =[]
for review in reviewContent:
reviewData = {}
reviewData["author"] = review.find("span",{"class":"author-name"}).getText()
reviewData["title"] = review.find("span",{"class":"review-title"}).getText()
reviewData["text"] = review.find("div",{"class":"review-text"}).getText()
reviewData["rating"] = review.find("div",{"class":"tiny-star star-rating-non-editable-container"})["aria-label"]
reviewData["link"] = review.find("div",{"class":"author"}).find("a")["href"]
result["review"].append(reviewData)
except AttributeError:
result["ratingValue"] = None
result["review"] = []
except Exception, e:
print e
try:
additionalinfo = htmlresponse.find("div", { "class" : "details-section metadata" })
result["datePublished"] = additionalinfo.find("div",{"itemprop":"datePublished"}).getText()
result["fileSize"] = additionalinfo.find("div",{"itemprop":"fileSize"}).getText()
result["currentVersion"] = additionalinfo.find("div",{"itemprop":"softwareVersion"}).getText()
result["requiresAndroid"] = additionalinfo.find("div",{"itemprop":"operatingSystems"}).getText().strip()
except Exception ,e:
print e
response["results"] = result
except Exception,e:
response["error"] = e
response["status"] = "Failed"
response["results"] = result
return response
| Python | 0.000001 | @@ -597,36 +597,32 @@
turn response%0A%0A%0A
-
%0A url = %22http
@@ -839,24 +839,16 @@
ayload)%0A
-
%0A
|
808ce13f918cdda1b55a2baa73e81128c4f8fdd7 | Check file size during initial creation | bepasty/views/upload.py | bepasty/views/upload.py | # Copyright: 2013 Bastian Blank <[email protected]>
# License: BSD 2-clause, see LICENSE for details.
import os
import re
from flask import abort, current_app, jsonify, redirect, request, url_for
from flask.views import MethodView
from ..utils.html import ContentRange
from ..utils.name import ItemName
from . import blueprint
class Upload(object):
_filename_re = re.compile(r'[^a-zA-Z0-9 \*+:;.,_-]+')
_type_re = re.compile(r'[^a-z/-]+')
@classmethod
def filter_filename(cls, i):
"""
Filter filename.
Only allow some basic characters and shorten to 50 characters.
"""
return cls._filename_re.sub('', i)[:50]
@classmethod
def filter_type(cls, i):
"""
Filter Content-Type
Only allow some basic characters and shorten to 50 characters.
"""
if not i:
return 'application/octet-stream'
return cls._type_re.sub('', i)[:50]
@classmethod
def meta_new(cls, item, input_size, input_filename, input_type):
item.meta['complete'] = False
item.meta['filename'] = cls.filter_filename(input_filename)
item.meta['size'] = int(input_size)
item.meta['type'] = cls.filter_type(input_type)
@classmethod
def meta_complete(cls, item):
item.meta['complete'] = True
@staticmethod
def data(item, f, size_input, offset=0):
"""
Copy data from temp file into storage.
"""
read_length = 16*1024
size_written = 0
while True:
read_length = min(read_length, size_input)
if size_input == 0:
break
buf = f.read(read_length)
if not buf:
# Should not happen, we already checked the size
raise RuntimeError
item.data.write(buf, offset + size_written)
len_buf = len(buf)
size_written += len_buf
size_input -= len_buf
return size_written
class UploadView(MethodView):
def post(self):
f = request.files['file']
if not f:
raise NotImplementedError
# Check Content-Range, disallow its usage
if ContentRange.from_request():
abort(416)
# Check Content-Type, default to application/octet-stream
content_type = (
f.headers.get('Content-Type') or
request.headers.get('Content-Type'))
# Get size of temporary file
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
# Create new name
name = ItemName.create()
with current_app.storage.create(name, size) as item:
Upload.data(item, f, size)
Upload.meta_new(item, size, f.filename, content_type)
Upload.meta_complete(item)
return redirect(url_for('bepasty.display', name=name))
class UploadNewView(MethodView):
def post(self):
data = request.get_json()
data_filename = data['filename']
data_size = int(data['size'])
data_type = data['type']
# Create new name
name = ItemName.create()
with current_app.storage.create(name, data_size) as item:
# Save meta-data
Upload.meta_new(item, data_size, data_filename, data_type)
return jsonify({'url': url_for('bepasty.upload_continue', name=name)})
class UploadContinueView(MethodView):
def post(self, name):
f = request.files['file']
if not f:
raise NotImplementedError
# Check Content-Range
content_range = ContentRange.from_request()
with current_app.storage.openwrite(name) as item:
if content_range:
Upload.data(item, f, content_range.size, content_range.begin)
if content_range.is_complete:
Upload.meta_complete(item)
else:
# Get size of temporary file
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
Upload.data(item, f, size)
Upload.meta_complete(item)
return jsonify({'files': [{
'filename': item.meta['filename'],
'size': item.meta['size'],
'url': url_for('bepasty.display', name=name),
}]})
blueprint.add_url_rule('/+upload', view_func=UploadView.as_view('upload'))
blueprint.add_url_rule('/+upload/new', view_func=UploadNewView.as_view('upload_new'))
blueprint.add_url_rule('/+upload/<itemname:name>', view_func=UploadContinueView.as_view('upload_continue'))
| Python | 0 | @@ -449,16 +449,250 @@
/-%5D+')%0A%0A
+ @classmethod%0A def filter_size(cls, i):%0A %22%22%22%0A Filter size.%0A Check for advertised size.%0A %22%22%22%0A i = int(i)%0A if i %3E= 4 * 1024 * 1024 * 1024: # 4 GiB%0A abort(413)%0A return i%0A%0A
@cla
@@ -1400,19 +1400,31 @@
ize'%5D =
-int
+cls.filter_size
(input_s
|
093cec82b1c30129a2ac96c631acd8563fff9cf7 | Update blacklisted plugins | modules/sfp_tool_whatweb.py | modules/sfp_tool_whatweb.py | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tool_whatweb
# Purpose: SpiderFoot plug-in for using the 'WhatWeb' tool.
# Tool: https://github.com/urbanadventurer/whatweb
#
# Author: <[email protected]>
#
# Created: 2019-08-31
# Copyright: (c) bcoles 2019
# Licence: GPL
# -------------------------------------------------------------------------------
from subprocess import Popen, PIPE
import io
import json
import os.path
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_tool_whatweb(SpiderFootPlugin):
"""Tool - WhatWeb:Footprint,Investigate:Content Analysis:tool:Identify what software is in use on the specified website."""
# Default options
opts = {
'aggression': 1,
'ruby_path': 'ruby',
'whatweb_path': ''
}
# Option descriptions
optdescs = {
'aggression': 'Set WhatWeb aggression level (1-4)',
'ruby_path': "Path to Ruby interpreter to use for WhatWeb. If just 'ruby' then it must be in your $PATH.",
'whatweb_path': "Path to the whatweb executable file. Must be set."
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
self.errorState = False
self.__dataSource__ = "Target Website"
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ['INTERNET_NAME']
def producedEvents(self):
return ['RAW_RIR_DATA', 'WEBSERVER_BANNER', 'WEBSERVER_TECHNOLOGY']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
if self.errorState:
return None
if eventData in self.results:
self.sf.debug("Skipping " + eventData + " as already scanned.")
return None
self.results[eventData] = True
if not self.opts['whatweb_path']:
self.sf.error("You enabled sfp_tool_whatweb but did not set a path to the tool!", False)
self.errorState = True
return None
exe = self.opts['whatweb_path']
if self.opts['whatweb_path'].endswith('/'):
exe = exe + 'whatweb'
# If tool is not found, abort
if not os.path.isfile(exe):
self.sf.error("File does not exist: " + exe, False)
self.errorState = True
return None
# Sanitize domain name.
if not self.sf.sanitiseInput(eventData):
self.sf.error("Invalid input, refusing to run.", False)
return None
# Set aggression level
try:
aggression = int(self.opts['aggression'])
if aggression > 4:
aggression = 4
if aggression < 1:
aggression = 1
except:
aggression = 1
# Run WhatWeb
args = [
self.opts['ruby_path'],
exe,
"--quiet",
"--aggression=" + str(aggression),
"--log-json=/dev/stdout",
"--user-agent=Mozilla/5.0",
"--follow-redirect=never",
eventData
]
try:
p = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input=None)
except BaseException as e:
self.sf.error("Unable to run WhatWeb: " + str(e), False)
return None
if p.returncode != 0:
self.sf.error("Unable to read WhatWeb output.", False)
self.sf.debug("Error running WhatWeb: " + stderr + ", " + stdout)
return None
if not stdout:
self.sf.debug("WhatWeb returned no output for " + eventData)
return None
try:
result_json = json.loads(stdout)
except BaseException as e:
self.sf.error("Couldn't parse the JSON output of WhatWeb: " + str(e), False)
return None
evt = SpiderFootEvent('RAW_RIR_DATA', str(result_json), self.__name__, event)
self.notifyListeners(evt)
blacklist = [
'Country', 'IP',
'Script', 'Title',
'HTTPServer', 'RedirectLocation', 'UncommonHeaders', 'Via-Proxy', 'Cookies', 'HttpOnly',
'Strict-Transport-Security', 'X-Cache', 'X-UA-Compatible', 'X-Powered-By', 'X-Frame-Options', 'X-XSS-Protection'
]
for result in result_json:
plugin_matches = result.get('plugins')
if not plugin_matches:
continue
if plugin_matches.get('HTTPServer'):
for w in plugin_matches.get('HTTPServer').get('string'):
evt = SpiderFootEvent('WEBSERVER_BANNER', w, self.__name__, event)
self.notifyListeners(evt)
if plugin_matches.get('X-Powered-By'):
for w in plugin_matches.get('X-Powered-By').get('string'):
evt = SpiderFootEvent('WEBSERVER_TECHNOLOGY', w, self.__name__, event)
self.notifyListeners(evt)
for plugin in plugin_matches:
if plugin in blacklist:
continue
evt = SpiderFootEvent('SOFTWARE_USED', plugin, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_tool_whatweb class
| Python | 0.000001 | @@ -4518,17 +4518,81 @@
', '
-X-Cache',
+x-hacker', 'x-machine', 'x-pingback', 'X-Backend', 'X-Cache',%0A
'X-
@@ -4622,16 +4622,35 @@
red-By',
+ 'X-Forwarded-For',
'X-Fram
|
63e28ba358fefbe3b3b5389dc9dabf3284588270 | Revert "Just use ckminions in batch mode." | salt/cli/batch.py | salt/cli/batch.py | # -*- coding: utf-8 -*-
'''
Execute batch runs
'''
# Import python libs
from __future__ import print_function
import math
import time
import copy
# Import salt libs
import salt.client
import salt.output
import salt.utils.minions
from salt.utils import print_cli
class Batch(object):
'''
Manage the execution of batch runs
'''
def __init__(self, opts, eauth=None, quiet=False):
self.opts = opts
self.eauth = eauth if eauth else {}
self.quiet = quiet
self.local = salt.client.get_local_client(opts['conf_file'])
self.minions = self.__gather_minions()
def __gather_minions(self):
'''
Return a list of minions to use for the batch run
'''
ckminions = salt.utils.minions.CkMinions(self.opts)
selected_target_option = self.opts.get('selected_target_option', None)
if selected_target_option is not None:
expr_form = selected_target_option
else:
expr_form = self.opts.get('expr_form', 'glob')
return ckminions.check_minions(self.opts['tgt'], expr_form=expr_form)
def get_bnum(self):
'''
Return the active number of minions to maintain
'''
partition = lambda x: float(x) / 100.0 * len(self.minions)
try:
if '%' in self.opts['batch']:
res = partition(float(self.opts['batch'].strip('%')))
if res < 1:
return int(math.ceil(res))
else:
return int(res)
else:
return int(self.opts['batch'])
except ValueError:
if not self.quiet:
print_cli('Invalid batch data sent: {0}\nData must be in the '
'form of %10, 10% or 3'.format(self.opts['batch']))
def run(self):
'''
Execute the batch run
'''
args = [[],
self.opts['fun'],
self.opts['arg'],
self.opts['timeout'],
'list',
]
bnum = self.get_bnum()
to_run = copy.deepcopy(self.minions)
active = []
ret = {}
iters = []
# the minion tracker keeps track of responses and iterators
# - it removes finished iterators from iters[]
# - if a previously detected minion does not respond, its
# added with an empty answer to ret{} once the timeout is reached
# - unresponsive minions are removed from active[] to make
# sure that the main while loop finishes even with unresp minions
minion_tracker = {}
# Iterate while we still have things to execute
while len(ret) < len(self.minions):
next_ = []
if len(to_run) <= bnum and not active:
# last bit of them, add them all to next iterator
while to_run:
next_.append(to_run.pop())
else:
for i in range(bnum - len(active)):
if to_run:
next_.append(to_run.pop())
active += next_
args[0] = next_
if next_:
if not self.quiet:
print_cli('\nExecuting run on {0}\n'.format(next_))
# create a new iterator for this batch of minions
new_iter = self.local.cmd_iter_no_block(
*args,
raw=self.opts.get('raw', False),
ret=self.opts.get('return', ''),
**self.eauth)
# add it to our iterators and to the minion_tracker
iters.append(new_iter)
minion_tracker[new_iter] = {}
# every iterator added is 'active' and has its set of minions
minion_tracker[new_iter]['minions'] = next_
minion_tracker[new_iter]['active'] = True
else:
time.sleep(0.02)
parts = {}
for queue in iters:
try:
# Gather returns until we get to the bottom
ncnt = 0
while True:
part = next(queue)
if part is None:
time.sleep(0.01)
ncnt += 1
if ncnt > 5:
break
continue
if self.opts.get('raw'):
parts.update({part['id']: part})
else:
parts.update(part)
except StopIteration:
# if a iterator is done:
# - set it to inactive
# - add minions that have not responded to parts{}
# check if the tracker contains the iterator
if queue in minion_tracker:
minion_tracker[queue]['active'] = False
# add all minions that belong to this iterator and
# that have not responded to parts{} with an empty response
for minion in minion_tracker[queue]['minions']:
if minion not in parts:
parts[minion] = {}
parts[minion]['ret'] = {}
for minion, data in parts.items():
active.remove(minion)
if self.opts.get('raw'):
yield data
else:
ret[minion] = data['ret']
yield {minion: data['ret']}
if not self.quiet:
ret[minion] = data['ret']
data[minion] = data.pop('ret')
if 'out' in data:
out = data.pop('out')
else:
out = None
salt.output.display_output(
data,
out,
self.opts)
# remove inactive iterators from the iters list
for queue in minion_tracker:
# only remove inactive queues
if not minion_tracker[queue]['active'] and queue in iters:
iters.remove(queue)
# also remove the iterator's minions from the active list
for minion in minion_tracker[queue]['minions']:
if minion in active:
active.remove(minion)
| Python | 0 | @@ -202,34 +202,8 @@
put%0A
-import salt.utils.minions%0A
from
@@ -703,59 +703,139 @@
-ckminions = salt.utils.minions.CkMinions(self.opts)
+args = %5Bself.opts%5B'tgt'%5D,%0A 'test.ping',%0A %5B%5D,%0A self.opts%5B'timeout'%5D,%0A %5D%0A
%0A
@@ -969,28 +969,28 @@
-expr_form =
+args.append(
selected
@@ -1003,16 +1003,17 @@
t_option
+)
%0A
@@ -1035,20 +1035,20 @@
-expr_form =
+args.append(
self
@@ -1077,16 +1077,18 @@
'glob')
+)%0A
%0A
@@ -1092,76 +1092,330 @@
+f
ret
-urn ckminions.check_minions(self.opts%5B'tgt'%5D, expr_form=expr_form
+ = %5B%5D%0A for ret in self.local.cmd_iter(*args, **self.eauth):%0A for minion in ret:%0A if not self.quiet:%0A print_cli('%7B0%7D Detected for this batch run'.format(minion))%0A fret.append(minion)%0A # Returns %3Ctype 'list'%3E%0A return sorted(frozenset(fret)
)%0A%0A
|
6f0676877f5c23c0e6d04422cb8365e16958eb82 | document potential for streaming | camerav4.py | camerav4.py | import picamera
from picamera import PiCamera
import time
from datetime import datetime
import os.path
from subprocess32 import Popen
print "\nSecurity Camera Logger v3 | Ben Broce & William Hampton\n\n"
print "Streams video to vids/vidstream.h264 | Captures to pics/[timestamp].jpg"
print "Ctrl-C quits.\n\n"
stream = raw_input("Should I stream video (y/n)? ")
length = float(raw_input("How long should I run (in minutes): "))*60
interval = float(raw_input("How often should I take a picture (in seconds): "))
print "Running..."
if stream == "y":
Popen(["./livestream.sh"])
camera = PiCamera()
camera.annotate_background = picamera.Color('black')
camera.rotation = 180
camera.resolution = (640, 480)
counter = 0
try:
camera.start_preview()
while (counter <= length):
timestamp = datetime.now().strftime("%m-%d-%Y_%H:%M:%S")
camera.annotate_text = timestamp
path = '/var/www/PiCamServer/pics/' + timestamp + '.jpg'
camera.capture(path, use_video_port=True)
time.sleep(interval)
counter += interval
finally:
print "Exiting..."
camera.stop_preview()
| Python | 0 | @@ -527,16 +527,272 @@
ng...%22%0A%0A
+#http://www.raspberry-projects.com/pi/pi-hardware/raspberry-pi-camera/streaming-video-using-vlc-player%0A#http://www.diveintopython.net/scripts_and_streams/stdin_stdout_stderr.html%0A#Ouput video (record) =%3E stream =%3E stdout =%3E %7C =%3E cvlc livestream =%3E browser%0A
if strea
|
6cce83bfcaf2072f57f15fb8d05e89c6d8ef56b6 | load from credentials.oracle, not credentials | database.py | database.py | #!/usr/bin/python
import MySQLdb, datetime, httplib, json, os
class mysql_database:
def __init__(self, host, user, password, db):
self.connection = MySQLdb.connect(host, user, password, db)
self.cursor = self.connection.cursor()
def execute(self, query):
try:
self.cursor.execute(query)
self.connection.commit()
except:
self.connection.rollback()
raise
def query(self, query):
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query)
return cursor.fetchall()
def __del__(self):
self.connection.close()
class oracle_apex_database:
def __init__(self, path, host = "apex.oracle.com"):
self.host = host
self.path = path
self.conn = httplib.HTTPSConnection(self.host)
self.credentials = None
credentials_file = os.path.join(os.path.dirname(__file__), "credentials")
if os.path.isfile(credentials_file):
f = open(credentials_file, "r")
self.credentials = json.load(f)
f.close()
for key, value in self.credentials.items(): #remove whitespace
self.credentials[key] = value.strip()
else:
print "credentials file not found"
self.default_data = { "Content-type": "text/plain", "Accept": "text/plain" }
def upload(self, id, ambient_temperature, ground_temperature, air_quality, air_pressure, humidity, wind_direction, wind_speed, wind_gust_speed, rainfall, created):
#keys must follow the names expected by the Orcale Apex REST service
oracle_data = {
"LOCAL_ID": str(id),
"AMB_TEMP": str(ambient_temperature),
"GND_TEMP": str(ground_temperature),
"AIR_QUALITY": str(air_quality),
"AIR_PRESSURE": str(air_pressure),
"HUMIDITY": str(humidity),
"WIND_DIRECTION": str(wind_direction),
"WIND_SPEED": str(wind_speed),
"WIND_GUST_SPEED": str(wind_gust_speed),
"RAINFALL": str(rainfall),
"READING_TIMESTAMP": str(created) }
for key in oracle_data.keys():
if oracle_data[key] == str(None):
del oracle_data[key]
return self.https_post(oracle_data)
def https_post(self, data, attempts = 3):
attempt = 0
headers = dict(self.default_data.items() + self.credentials.items() + data.items())
success = False
response_data = None
while not success and attempt < attempts:
try:
self.conn.request("POST", self.path, None, headers)
response = self.conn.getresponse()
response_data = response.read()
print response.status, response.reason, response_data
success = response.status == 200 or response.status == 201
except Exception as e:
print "Unexpected error", e
finally:
attempt += 1
return response_data if success else None
def __del__(self):
self.conn.close()
class weather_database:
def __init__(self):
self.db = mysql_database("localhost", "root", "raspberry", "weather")
self.insert_template = "INSERT INTO WEATHER_MEASUREMENT (AMBIENT_TEMPERATURE, GROUND_TEMPERATURE, AIR_QUALITY, AIR_PRESSURE, HUMIDITY, WIND_DIRECTION, WIND_SPEED, WIND_GUST_SPEED, RAINFALL, CREATED) VALUES({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, '{9}');"
self.update_template = "UPDATE WEATHER_MEASUREMENT SET REMOTE_ID={0} WHERE ID={1};"
self.upload_select_template = "SELECT * FROM WEATHER_MEASUREMENT WHERE REMOTE_ID IS NULL;"
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def is_none(self, val):
return val if val != None else "NULL"
def insert(self, ambient_temperature, ground_temperature, air_quality, air_pressure, humidity, wind_direction, wind_speed, wind_gust_speed, rainfall, created = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")):
insert_query = self.insert_template.format(
self.is_none(ambient_temperature),
self.is_none(ground_temperature),
self.is_none(air_quality),
self.is_none(air_pressure),
self.is_none(humidity),
self.is_none(wind_direction),
self.is_none(wind_speed),
self.is_none(wind_gust_speed),
self.is_none(rainfall),
created)
print insert_query
self.db.execute(insert_query)
def upload(self):
results = self.db.query(self.upload_select_template)
rows_count = len(results)
if rows_count > 0:
print rows_count, "rows to send..."
odb = oracle_apex_database(path = "/pls/apex/jeffsalleh/weatherstation/submitmeasurement")
if odb.credentials == None:
return #cannot upload
for row in results:
response_data = odb.upload(
row["ID"],
row["AMBIENT_TEMPERATURE"],
row["GROUND_TEMPERATURE"],
row["AIR_QUALITY"],
row["AIR_PRESSURE"],
row["HUMIDITY"],
row["WIND_DIRECTION"],
row["WIND_SPEED"],
row["WIND_GUST_SPEED"],
row["RAINFALL"],
row["CREATED"].strftime("%Y-%m-%dT%H:%M:%S"))
if response_data != None and response_data != "-1":
json_dict = json.loads(response_data)
oracle_id = json_dict["ORCL_RECORD_ID"]
if self.is_number(oracle_id):
local_id = str(row["ID"])
update_query = self.update_template.format(oracle_id, local_id)
self.db.execute(update_query)
print "ID:", local_id, "updated with REMOTE_ID =", oracle_id
else:
print "Bad response from Oracle"
else:
print "Nothing to upload"
| Python | 0.000019 | @@ -953,16 +953,23 @@
dentials
+.oracle
%22)%0A
|
37e2813d2b972100e7a562fe99cde72c99f5a544 | Add handler to create token when a user is created | popit/signals/handlers.py | popit/signals/handlers.py | # TODO: Implement for each
from django.db.models.signals import pre_delete
from django.db.models.signals import post_save
from django.dispatch import receiver
from popit.models import Person
from popit.models import Organization
from popit.models import Membership
from popit.models import Post
from popit.serializers import PersonSerializer
from popit.serializers import OrganizationSerializer
from popit.serializers import MembershipSerializer
from popit.serializers import PostSerializer
from popit_search.utils import search
@receiver(post_save, sender=Person)
def person_save_handler(sender, instance, created, raw, using, update_fields, **kwargs):
if raw:
return
language_code = instance.language_code
id = instance.id
query = "id:%s AND language_code:%s" % (id, language_code)
indexer = search.SerializerSearch("person")
check = indexer.search(query, language=language_code)
if not check:
indexer.add(instance, PersonSerializer)
else:
indexer.update(instance, PersonSerializer)
# use pre_delete event is a good idea, this ensure data exist before data is emoved in indexer
@receiver(pre_delete, sender=Person)
def person_delete_handler(sender, instance, using, **kwargs):
indexer = search.SerializerSearch("person")
indexer.delete(instance)
@receiver(post_save, sender=Organization)
def organization_save_handler(sender, instance, created, raw, using, update_fields, **kwargs):
if raw:
return
language_code = instance.language_code
id = instance.id
query = "id:%s AND language_code:%s" % (id, language_code)
indexer = search.SerializerSearch("organization")
check = indexer.search(query, language=language_code)
if not check:
indexer.add(instance, OrganizationSerializer)
else:
indexer.update(instance, OrganizationSerializer)
@receiver(pre_delete, sender=Organization)
def organization_delete_handler(sender, instance, using, **kwargs):
indexer = search.SerializerSearch("organization")
indexer.delete(instance)
@receiver(post_save, sender=Membership)
def membership_save_handler(sender, instance, created, raw, using, update_fields, **kwargs):
if raw:
return
language_code = instance.language_code
id = instance.id
query = "id:%s AND language_code:%s" % (id, language_code)
indexer = search.SerializerSearch("membership")
check = indexer.search(query, language=language_code)
if not check:
indexer.add(instance, MembershipSerializer)
else:
indexer.update(instance, MembershipSerializer)
@receiver(pre_delete, sender=Membership)
def membership_delete_handler(sender, instance, using, **kwargs):
indexer = search.SerializerSearch("membership")
indexer.delete(instance)
@receiver(post_save, sender=Post)
def post_save_handler(sender, instance, created, raw, using, update_fields, **kwargs):
if raw:
return
language_code = instance.language_code
id = instance.id
query = "id:%s AND language_code:%s" % (id, language_code)
indexer = search.SerializerSearch("post")
check = indexer.search(query, language=language_code)
if not check:
indexer.add(instance, PostSerializer)
else:
indexer.update(instance, PostSerializer)
@receiver(pre_delete, sender=Post)
def post_delete_handler(sender, instance, using, **kwargs):
indexer = search.SerializerSearch("post")
indexer.delete(instance) | Python | 0 | @@ -1,31 +1,4 @@
-# TODO: Implement for each%0A
from
@@ -125,16 +125,60 @@
eceiver%0A
+from django.contrib.auth.models import User%0A
from pop
@@ -539,16 +539,66 @@
search%0A
+from rest_framework.authtoken.models import Token%0A
%0A%0A@recei
@@ -3501,28 +3501,195 @@
indexer.delete(instance)
+%0A%0A%0A@receiver(post_save, sender=User)%0Adef create_auth_token(sender, instance=None, created=False, **kwargs):%0A if created:%0A Token.objects.create(user=instance)
|
28d27274336ca4c84d732418b86292fe0f8b2634 | Update pseudocolor.py | plantcv/plantcv/pseudocolor.py | plantcv/plantcv/pseudocolor.py | # Pseudocolor any grayscale image
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import *
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import fatal_error
def pseudocolor(gray_img, mask=None, cmap=None, background="image", min_value=0, max_value=255, obj=None, dpi=None,
axes=True, colorbar=True, path="."):
"""Pseudocolor any grayscale image to custom colormap
Inputs:
gray_img = grayscale image data
mask = binary mask
cmap = colormap
background = background color/type. Options are "image" (gray_img), "white", or "black".
A mask must be supplied
min_value = minimum value for range of interest
max_value = maximum value for range of interest
obj = if provided, the pseudocolored image gets cropped down to the region of interest
dpi = dots per inch
axes = if False then x- and y-axis won't be displayed, nor will the title
colorbar = if False then colorbar won't be displayed
path = path for location for saving the image
Returns:
pseudo_image = pseudocolored image
:param gray_img: numpy.ndarray
:param mask: numpy.ndarray
:param cmap: str
:param background: str
:param min_value: int
:param max_value: int
:param obj: numpy.ndarray
:param dpi: int
:param axes: bool
:param path: str
:return pseudo_image: numpy.ndarray
"""
# Auto-increment the device counter
params.device += 1
# Check if the image is grayscale
if len(np.shape(gray_img)) != 2:
fatal_error("Image must be grayscale.")
# if max != 255:
# # Any pixels above the max_value set to the max value
# gray_img[gray_img > max_value] = max_value
# if min_value != 0:
# # Any pixels below min_value set to the min_value value
# gray_img[gray_img < min_value] = min_value
# Apply the mask if given
if mask is not None:
if obj is not None:
# Copy the image
img_copy = np.copy(gray_img)
# Extract contour size
x, y, w, h = cv2.boundingRect(obj)
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)
# Crop down the image
crop_img = gray_img[y:y + h, x:x + w]
# Calculate the buffer size based on the contour size
offsetx = int(w / 5)
offsety = int(h / 5)
# copyMakeBorder will make a black or white frame around the image
if background == "image":
# gray_img = gray_img[y:y + h + (2*offsety), x:x + w + (2*offsetx)]
gray_img = gray_img[y - offsety:y + h + offsety, x - offsetx:x + w + offsetx]
else:
# Crop img including buffer
gray_img = cv2.copyMakeBorder(crop_img, offsety, offsety, offsetx, offsetx, cv2.BORDER_CONSTANT,
value=(0, 0, 0))
# Crop the mask to the same size
crop_mask = mask[y:y + h, x:x + w]
mask = cv2.copyMakeBorder(crop_mask, offsety, offsety, offsetx, offsetx, cv2.BORDER_CONSTANT,
value=(0, 0, 0))
# Apply the mask
masked_img = np.ma.array(gray_img, mask=~mask.astype(np.bool))
# Set the background color or type
if background == "black":
# Background is all zeros
bkg_img = np.zeros(np.shape(gray_img), dtype=np.uint8)
# Use the gray cmap for the background
bkg_cmap = "gray"
elif background == "white":
# Background is all 255 (white)
bkg_img = np.zeros(np.shape(gray_img), dtype=np.uint8)
bkg_img += 255
# Use the reverse gray cmap for the background
bkg_cmap = "gray_r"
elif background == "image":
# Set the background to the inpute gray image
bkg_img = gray_img
# Use the gray cmap for the background
bkg_cmap = "gray"
else:
fatal_error(
"Background type {0} is not supported. Please use 'white', 'black', or 'image'.".format(background))
# Pseudocolor the image
# Plot the background first
pseudo_img1 = plt.imshow(bkg_img, cmap=bkg_cmap)
# Overlay the masked grayscale image with the user input colormap
plt.imshow(masked_img, cmap=cmap, vmin=min_value, vmax=max_value)
if colorbar:
# Include the colorbar
plt.colorbar(fraction=0.033, pad=0.04)
if axes:
# Include image title
plt.title('Pseudocolored image') # + os.path.splitext(filename)[0])
else:
# Remove axes
plt.xticks([])
plt.yticks([])
# Store the current figure
pseudo_img = plt.gcf()
# Print or plot if debug is turned on
if params.debug == 'print':
plt.savefig(os.path.join(path, str(params.device) + '_pseudocolored.png'), dpi=dpi)
plt.close()
elif params.debug == 'plot':
plot_image(pseudo_img1)
# Use non-blocking mode in case the function is run more than once
plt.show(block=False)
elif params.debug == None:
plt.clf()
plt.close()
else:
# Pseudocolor the image
pseudo_img1 = plt.imshow(gray_img, cmap=cmap, vmin=min_value, vmax=max_value)
# Include image title
plt.title('Pseudocolored image') # + os.path.splitext(filename)[0])
if colorbar:
# Include the colorbar
plt.colorbar(fraction=0.033, pad=0.04)
if axes:
# Include image title
plt.title('Pseudocolored image') # + os.path.splitext(filename)[0])
else:
# Remove axes
plt.xticks([])
plt.yticks([])
pseudo_img = plt.gcf()
# Print or plot if debug is turned on
if params.debug == 'print':
plt.savefig(os.path.join(path, str(params.device) + '_pseudocolored.png'), dpi=dpi)
pseudo_img.clear()
plt.close()
elif params.debug == 'plot':
plot_image(pseudo_img1)
# Use non-blocking mode in case the function is run more than once
plt.show(block=False)
elif params.debug == None:
plt.clf()
plt.close()
return pseudo_img
| Python | 0.000002 | @@ -1341,35 +1341,39 @@
aram min_value:
-int
+numeric
%0A :param max_
@@ -1379,19 +1379,23 @@
_value:
-int
+numeric
%0A :pa
|
9b237bc260d4f78ddc97b8d678d36076d22178e6 | use custom vm | datastax.py | datastax.py | import yaml
import base64
import json
def GenerateFirewall(context):
name = context.env['deployment'] + '-opscenterfirewall-' + context.env['name']
firewalls = [
{
'name': name,
'type': 'compute.v1.firewall',
'properties': {
'sourceRanges': [
'0.0.0.0/0'
],
'allowed': [{
'IPProtocol': 'tcp',
'ports': ['8888', '8443']
}]
}
}
]
return firewalls
def GenerateReferencesList(context):
reference_list = []
n_of_copies = context.properties['nodesPerZone']
dep_name = context.env['deployment']
for zone in context.properties['zones']:
for idx in range(1, n_of_copies + 1):
node_name = '$(ref.' + dep_name + '-' + zone + '-' + str(idx) + '-vm' + '.selfLink)'
reference_list.append(node_name)
return ' '.join(reference_list)
def GenerateConfig(context):
config = {'resources': []}
zonal_clusters = {
'name': 'clusters-' + context.env['name'],
'type': 'regional_multi_vm.py',
'properties': {
'sourceImage': 'https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20160314',
'zones': context.properties['zones'],
'machineType': context.properties['machineType'],
'network': 'default',
'numberOfVMReplicas': context.properties['nodesPerZone'],
'disks': [
{
'deviceName': 'vm-test-data-disk',
'type': 'PERSISTENT',
'boot': 'false',
'autoDelete': 'true',
'initializeParams': {
'diskType': 'pd-ssd',
'diskSizeGb': context.properties['diskSize']
}
}
],
'bootDiskType': 'pd-standard',
'metadata': {
'items': [
{
'key': 'startup-script',
'value': '''
#!/bin/bash
mkdir /mnt
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" /dev/disk/by-id/google-${HOSTNAME}-test-data-disk /mnt
chmod 777 /mnt
echo "Installing Java"
apt-get update
apt-get -y install openjdk-7-jre-headless
'''
}
]
}
}
}
ops_center_script = '''
#! /bin/bash
ssh-keygen -b 2048 -t rsa -f /tmp/sshkey -q -N ""
echo -n 'root:' | cat - /tmp/sshkey.pub > temp && mv temp /tmp/sshkey.pub
gcloud compute project-info add-metadata --metadata-from-file sshKeys=/tmp/sshkey.pub
echo "Installing Java"
apt-get update
apt-get -y install openjdk-7-jre-headless
echo "Installing OpsCenter"
echo "deb http://debian.datastax.com/community stable main" | tee -a /etc/apt/sources.list.d/datastax.community.list
curl -L http://debian.datastax.com/debian/repo_key | apt-key add -
apt-get update
apt-get -y install opscenter=5.2.4
echo "Starting OpsCenter"
sudo service opscenterd start
echo "Waiting for OpsCenter to start..."
sleep 15
echo "Waiting for Java to install on nodes..."
sleep 120
wget https://raw.githubusercontent.com/DSPN/google-cloud-platform-dse/master/provision/opsCenter.py
echo "Generating a provision.json file"
python opsCenter.py '''
# parameters go here
ops_center_script += context.env['deployment'] + ' '
ops_center_script += base64.b64encode(json.dumps(context.properties['zones'])) + ' '
ops_center_script += str(context.properties['nodesPerZone']) + ' '
ops_center_script += str(context.properties['nodeType'])
ops_center_script += '''
echo "Provisioning a new cluster using provision.json"
curl --insecure -H "Accept: application/json" -X POST http://127.0.0.1:8888/provision -d @provision.json
'''
ops_center_node = {
'name': context.env['deployment'] + '-opscenter-' + context.env['name'],
'type': 'vm_instance.py',
'properties': {
'sourceImage': 'https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20160314',
'zone': context.properties['opsCenterZone'],
'machineType': context.properties['machineType'],
'network': 'default',
'bootDiskType': 'pd-standard',
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/compute']
}],
'metadata': {
'items': [
{
'key': 'startup-script',
'value': ops_center_script
},
{
'key': 'bogus-references',
'value': GenerateReferencesList(context)
}
]
}
}
}
config['resources'].append(zonal_clusters)
config['resources'].append(ops_center_node)
config['resources'].extend(GenerateFirewall(context))
return yaml.dump(config)
| Python | 0 | @@ -1243,39 +1243,39 @@
v1/projects/
-ubuntu-os-cloud
+datastax-public
/global/imag
@@ -1281,36 +1281,40 @@
ges/
+datastax-
ubuntu
--
1404-
-trusty-v20160314
+img-03172016
',%0A
@@ -4544,23 +4544,23 @@
cts/
-ubuntu-os-cloud
+datastax-public
/glo
@@ -4574,36 +4574,40 @@
ges/
+datastax-
ubuntu
--
1404-
-trusty-v20160314
+img-03172016
',%0A
|
5f46c92ccaeb273e0d3422b65f983aaf327e8f5a | drop more unused code | datastax.py | datastax.py | import yaml
import base64
import json
def GenerateFirewall(context):
name = 'opscenterfirewall-' + context.env['name']
firewalls = [
{
'name': name,
'type': 'compute.v1.firewall',
'properties': {
'sourceRanges': [
'0.0.0.0/0'
],
'allowed': [{
'IPProtocol': 'tcp',
'ports': ['8888', '8443']
}]
}
}
]
return firewalls
def GenerateReferencesList(context):
reference_list = []
n_of_copies = context.properties['nodesPerZone']
dep_name = context.env['deployment']
for zone in context.properties['zones']:
for idx in range(1, n_of_copies + 1):
node_name = '$(ref.' + dep_name + '-service-' + zone + '-' + str(idx) + '-vm' + '.selfLink)'
reference_list.append(node_name)
return ' '.join(reference_list)
def GenerateConfig(context):
config = {'resources': []}
#seed_nodes_dns_names = ''
#for zone in context.properties['zones']:
# seed_nodes_dns_names += context.env['deployment'] + '-service-' + zone + '-1-vm,'
#seed_nodes_dns_names = seed_nodes_dns_names[:-1]
# just going to do one seed for now
seed_nodes_dns_names = context.env['deployment'] + '-service-' + context.properties['zones'][0] + '-1-vm.c.' + context.env['project'] + '.internal'
dse_node_script = '''
#!/usr/bin/env bash
wget https://github.com/DSPN/install-datastax/archive/master.zip
apt-get -y install unzip
unzip master.zip
cd install-datastax-master/bin
cloud_type="google"
zone=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/zone" | grep -o [[:alnum:]-]*$)
data_center_name=$zone
seed_nodes_dns_names=''' + seed_nodes_dns_names + '''
echo "Configuring nodes with the settings:"
echo cloud_type $cloud_type
echo data_center_name $data_center_name
echo seed_nodes_dns_names $seed_nodes_dns_names
./dse.sh $cloud_type $seed_nodes_dns_names $data_center_name
'''
zonal_clusters = {
'name': 'clusters-' + context.env['name'],
'type': 'regional_multi_vm.py',
'properties': {
'sourceImage': 'https://www.googleapis.com/compute/v1/projects/datastax-public/global/images/datastax',
'zones': context.properties['zones'],
'machineType': context.properties['machineType'],
'network': 'default',
'numberOfVMReplicas': context.properties['nodesPerZone'],
'disks': [
{
'deviceName': 'vm-test-data-disk',
'type': 'PERSISTENT',
'boot': 'false',
'autoDelete': 'true',
'initializeParams': {
'diskType': 'pd-ssd',
'diskSizeGb': context.properties['diskSize']
}
}
],
'bootDiskType': 'pd-standard',
'metadata': {
'items': [
{
'key': 'startup-script',
'value': dse_node_script
}
]
}
}
}
ops_center_script = '''
#!/usr/bin/env bash
wget https://github.com/DSPN/install-datastax/archive/master.zip
apt-get -y install unzip
unzip master.zip
cd install-datastax-master/bin
seed_nodes_dns_names=''' + seed_nodes_dns_names + '''
echo "Configuring nodes with the settings:"
echo seed_nodes_dns_names $seed_nodes_dns_names
cloud_type="google"
./opscenter.sh $cloud_type $seed_nodes_dns_names
'''
ops_center_node = {
'name': 'opscenter-' + context.env['name'],
'type': 'vm_instance.py',
'properties': {
'sourceImage': 'https://www.googleapis.com/compute/v1/projects/datastax-public/global/images/datastax',
'zone': context.properties['opsCenterZone'],
'machineType': context.properties['machineType'],
'network': 'default',
'bootDiskType': 'pd-standard',
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/compute']
}],
'metadata': {
'items': [
{
'key': 'startup-script',
'value': ops_center_script
},
{
'key': 'bogus-references',
'value': GenerateReferencesList(context)
}
]
}
}
}
config['resources'].append(zonal_clusters)
config['resources'].append(ops_center_node)
config['resources'].extend(GenerateFirewall(context))
return yaml.dump(config)
| Python | 0 | @@ -9,34 +9,8 @@
aml%0A
-import base64%0Aimport json%0A
%0A%0Ade
@@ -991,271 +991,8 @@
%5D%7D%0A%0A
- #seed_nodes_dns_names = ''%0A #for zone in context.properties%5B'zones'%5D:%0A # seed_nodes_dns_names += context.env%5B'deployment'%5D + '-service-' + zone + '-1-vm,'%0A #seed_nodes_dns_names = seed_nodes_dns_names%5B:-1%5D%0A%0A # just going to do one seed for now%0A
@@ -1724,56 +1724,8 @@
ype%0A
- echo data_center_name $data_center_name%0A
@@ -1772,24 +1772,72 @@
s_dns_names%0A
+ echo data_center_name $data_center_name%0A
./ds
@@ -3306,24 +3306,50 @@
master/bin%0A%0A
+ cloud_type=%22google%22%0A
seed_n
@@ -3442,24 +3442,58 @@
settings:%22%0A
+ echo cloud_type $cloud_type%0A
echo s
@@ -3538,34 +3538,8 @@
mes%0A
- cloud_type=%22google%22%0A
|
48a30aade7e606e671db44e8ee69092c0e67b363 | Complete lc051_n_queens.py | lc051_n_queens.py | lc051_n_queens.py | """Leetcode 51. N-Queens.
Hard.
URL: https://leetcode.com/problems/n-queens/
The n-queens puzzle is the problem of placing n queens on an nxn chessboard
such that no two queens attack each other.
Given an integer n, return all distinct solutions to the n-queens puzzle.
Each solution contains a distinct board configuration of the n-queens' placement,
where 'Q' and '.' both indicate a queen and an empty space respectively.
Example:
Input: 4
Output: [
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
Explanation: There exist two distinct solutions to the 4-queens puzzle
as shown above.
"""
class Solution(object):
def _is_valid(self, queens):
"""Check current queen position is valid."""
current_row, current_col = len(queens) - 1, queens[-1]
# Check any queens can attack the current queen.
for row, col in enumerate(queens[:-1]):
col_diff = abs(current_col - col)
row_diff = abs(current_row - row)
if col_diff == 0 or col_diff == row_diff:
return False
return True
def solveNQueens(self, n, res, queens):
"""
:type n: int
:rtype: List[List[str]]
"""
# queens is an 1-d array to store the column ids of queens.
if n == len(queens):
res.append(queens[:])
return res
for col in range(n):
# Append current queen's column id.
queens.append(col)
if self._is_valid(queens):
# If current queen's position is valid, search the next level.
self.solveNQueens(n, res, queens)
# Backtrack by poping out current queens.
queens.pop()
return res
def main():
n = 4
print Solution().solveNQueens(n, [], [])
if __name__ == '__main__':
main()
| Python | 0.99916 | @@ -764,16 +764,38 @@
is valid
+ among previous queens
.%22%22%22%0A
@@ -1165,27 +1165,19 @@
def
-solveNQueen
+_df
s(self,
@@ -1182,24 +1182,27 @@
, n, res
+=%5B%5D
, queens
):%0A
@@ -1185,32 +1185,35 @@
, res=%5B%5D, queens
+=%5B%5D
):%0A %22%22%22%0A
@@ -1214,141 +1214,55 @@
%22%22%22
-%0A :type n: int%0A :rtype: List%5BList%5Bstr%5D%5D%0A %22%22%22%0A # queens is an 1-d array to store the column ids of queens.
+DFS for putting queens in suitable position.%22%22%22
%0A
@@ -1340,19 +1340,20 @@
return
-res
+None
%0A%0A
@@ -1595,27 +1595,19 @@
self.
-solveNQueen
+_df
s(n, res
@@ -1668,17 +1668,16 @@
nt queen
-s
.%0A
@@ -1699,35 +1699,530 @@
p()%0A
+%0A
- %0A return res
+def solveNQueens(self, n):%0A %22%22%22%0A :type n: int%0A :rtype: List%5BList%5Bstr%5D%5D%0A%0A Time complexity: O(n!).%0A Space complexity: O(n).%0A %22%22%22%0A # res to collect multiple solutions for n_queens.%0A res = %5B%5D%0A # queens is an 1-d array to store the column ids of queens.%0A queens = %5B%5D%0A%0A self._dfs(n, res, queens)%0A%0A # Make solution configs.%0A sol = %5B%5B'.'*j + 'Q' + '.'*(n - j - 1) for j in queens%5D %0A for queens in res%5D%0A return sol
%0A%0A%0Ad
@@ -2281,16 +2281,8 @@
ns(n
-, %5B%5D, %5B%5D
)%0A%0A%0A
|
16c1ae09e0288036aae87eb4337c24b23b1e6638 | Clean up some unused imports and comments | classify.py | classify.py | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
import numpy as np
import json
import sys
import time
from analyze import Analyzer # for some train data labelling
def main(argv):
group = argv[0] if len(argv) > 0 else "id"
train_size = int(argv[1]) if len(argv) > 1 else 1000
train_data = []
train_labels = []
analyzer = Analyzer(group)
for message, _ in analyzer.read_json(sys.stdin):
label = analyzer.analyze(message)[0]
train_data.append(message)
train_labels.append(label)
if len(train_data) >= train_size:
break
regressor = Pipeline([
('tfidf', TfidfVectorizer(input='content')),
('clf', RandomForestRegressor())
#('clf', KNeighborsRegressor())
])
regressor.fit(train_data, train_labels)
for message, group in analyzer.read_json(sys.stdin):
# Call predict for every message which might be slow in practice but
# avoids memory hog due to not being able to use iterators if done in
# one batch.
prediction = regressor.predict([message])[0]
if analyzer.display:
# Take the color for this group of predictions
c = cmp(prediction, 0)
message = analyzer.colors[c] + message + analyzer.END_COLOR
analyzer.output(group, message, prediction, "")
if __name__ == "__main__":
main(sys.argv[1:])
| Python | 0 | @@ -57,58 +57,8 @@
zer%0A
-from sklearn.neighbors import KNeighborsRegressor%0A
from
@@ -188,20 +188,8 @@
sys%0A
-import time%0A
from
@@ -795,48 +795,8 @@
())%0A
- #('clf', KNeighborsRegressor())%0A
|
4f3d1e90ec4af618ada415f53ddd9eec42bafb38 | Indent with 4 spaces, not 3 | wafer/talks/tests/test_wafer_basic_talks.py | wafer/talks/tests/test_wafer_basic_talks.py | # This tests the very basic talk stuff, to ensure some levels of sanity
def test_add_talk():
"""Create a user and add a talk to it"""
from django.contrib.auth.models import User
from wafer.talks.models import Talks
user = User.objects.create_user('john', '[email protected]', 'johnpassword')
talk = Talks.objects.create(title="This is a test talk",
abstract="This should be a long and interesting abstract, but isn't",
corresponding_author_id=user.id)
assert user.contact_talks.count() == 1
| Python | 0.998158 | @@ -91,16 +91,17 @@
k():%0A
+
%22%22%22Creat
@@ -133,16 +133,17 @@
o it%22%22%22%0A
+
from
@@ -184,16 +184,17 @@
User%0A
+
from waf
@@ -223,16 +223,17 @@
Talks%0A%0A
+
user
@@ -305,16 +305,17 @@
rd')%0A
+
talk = T
@@ -363,16 +363,17 @@
talk%22,%0A
+
@@ -452,16 +452,17 @@
+
correspo
@@ -487,16 +487,17 @@
er.id)%0A%0A
+
asser
|
82f648557d3c14568811038a63d766d3d84a9b79 | Fix dataverse upload bug | waterbutler/providers/dataverse/provider.py | waterbutler/providers/dataverse/provider.py | import asyncio
import http
import tempfile
import xmltodict
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.providers.dataverse import settings
from waterbutler.providers.dataverse.metadata import DataverseDatasetMetadata
from waterbutler.providers.dataverse import utils as dataverse_utils
class DataverseProvider(provider.BaseProvider):
EDIT_MEDIA_BASE_URL = settings.EDIT_MEDIA_BASE_URL
DOWN_BASE_URL = settings.DOWN_BASE_URL
METADATA_BASE_URL = settings.METADATA_BASE_URL
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.token = self.credentials['token']
self.doi = self.settings['doi']
@asyncio.coroutine
def download(self, path, **kwargs):
resp = yield from self.make_request(
'GET',
provider.build_url(self.DOWN_BASE_URL, path),
expects=(200, ),
throws=exceptions.DownloadError,
params={'key': self.token},
)
return streams.ResponseStreamReader(resp)
@asyncio.coroutine
def upload(self, stream, path, **kwargs):
filename = path.strip('/')
stream = streams.ZipStreamReader(
filename=filename,
file_stream=stream,
)
# Write stream to disk (Necessary to find zip file size)
f = tempfile.TemporaryFile()
chunk = yield from stream.read()
while chunk:
f.write(chunk)
chunk = yield from stream.read()
stream = streams.FileStreamReader(f)
dv_headers = {
"Content-Disposition": "filename=temp.zip",
"Content-Type": "application/zip",
"Packaging": "http://purl.org/net/sword/package/SimpleZip",
"Content-Length": str(stream.size),
}
yield from self.make_request(
'POST',
provider.build_url(self.EDIT_MEDIA_BASE_URL, 'study', self.doi),
headers=dv_headers,
auth=(self.token, ),
data=stream,
expects=(201, ),
throws=exceptions.UploadError
)
# Find appropriate version of file from metadata url
data = yield from self.metadata()
filename, version = dataverse_utils.unpack_filename(filename)
highest_compatible = None
# Reduce to files of the same base name of the same/higher version
filtered_data = sorted([
f for f in data
if f['extra']['original'] == filename
and f['extra']['version'] >= version
], key=lambda f: f['extra']['version'])
# Find highest version from original without a gap in between
for item in filtered_data:
if item['extra']['version'] == version:
highest_compatible = item
version += 1
else:
break
return highest_compatible, True
@asyncio.coroutine
def delete(self, path, **kwargs):
yield from self.make_request(
'DELETE',
provider.build_url(self.EDIT_MEDIA_BASE_URL, 'file', path),
auth=(self.token, ),
expects=(204, ),
throws=exceptions.DeleteError,
)
@asyncio.coroutine
def metadata(self, path, state='draft', **kwargs):
url = provider.build_url(self.METADATA_BASE_URL, self.doi)
resp = yield from self.make_request(
'GET',
url,
auth=(self.token, ),
expects=(200, ),
throws=exceptions.MetadataError
)
data = yield from resp.text()
data = xmltodict.parse(data)
dataset_metadata = DataverseDatasetMetadata(data, state).serialized()
# Dataset metadata
if path == '/':
return dataset_metadata
# File metadata
else:
try:
return next(
item for item in dataset_metadata if item['path'] == path
)
except StopIteration:
raise exceptions.MetadataError(
"Could not retrieve file '{}'".format(path),
code=http.client.NOT_FOUND,
)
| Python | 0.000001 | @@ -3356,16 +3356,20 @@
lf, path
+='/'
, state=
|
7b0a6d27389f8e4abde77b2ed76dac795c33cfab | Use url_for | demo/app.py | demo/app.py | import flask
from flask import request
from markupsafe import Markup
import diffhtml
app = flask.Flask('Diff-HTML Demo')
DEFAULT_A = """
I am the very model of a modern Major-General,
I've information vegetable, animal, and mineral,
I know the kings of England, and I quote the fights historical,
From Marathon to Waterloo, in order categorical.
"""
DEFAULT_B = """
I am the very model of a cartoon individual,
My animation's comical, unusual, and whimsical,
I'm quite adept at funny gags, comedic theory I have read,
From wicked puns and stupid jokes to anvils that drop on your head.
"""
@app.route('/ndiff', methods=['GET', 'POST'])
def ndiff():
a = request.form.get('a', DEFAULT_A)
b = request.form.get('b', DEFAULT_B)
try:
cutoff = float(request.form.get('cutoff', 0.6))
except ValueError:
cutoff = 0.6
context = {
'result': None,
'cutoff': cutoff,
'input': {'a': a, 'b': b},
}
if request.method == 'POST':
context['result'] = Markup('<br>').join(diffhtml.ndiff(
a.splitlines(), b.splitlines(), cutoff=cutoff,
))
return flask.render_template('ndiff.html', **context)
@app.route('/')
def home():
return flask.redirect('/ndiff')
if __name__ == '__main__':
app.run()
| Python | 0.000354 | @@ -1234,17 +1234,31 @@
ect(
+flask.url_for(
'
-/
ndiff')
+)
%0A%0A%0Ai
|
dd249ca665f21f574d9ff992e0cd3e78433c7fa7 | Use printf-style String Formatting for output | sanic/response.py | sanic/response.py | import ujson
STATUS_CODES = {
200: 'OK',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
}
class HTTPResponse:
__slots__ = ('body', 'status', 'content_type', 'headers')
def __init__(self, body=None, status=200, headers=[], content_type='text/plain', body_bytes=b''):
self.content_type = content_type
if not body is None:
self.body = body.encode('utf-8')
else:
self.body = body_bytes
self.status = status
self.headers = headers
def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None):
# This is all returned in a kind-of funky way
# We tried to make this as fast as possible in pure python
additional_headers = []
if keep_alive and not keep_alive_timeout is None:
additional_headers = [b'Keep-Alive: timeout=', str(keep_alive_timeout).encode(), b's\r\n']
if self.headers:
for name, value in self.headers.items():
additional_headers.append('{}: {}\r\n'.format(name, value).encode('utf-8'))
return b''.join([
'HTTP/{} {} {}\r\n'.format(version, self.status,
STATUS_CODES.get(self.status, 'FAIL')).encode(),
b'Content-Type: ', self.content_type.encode(), b'\r\n',
b'Content-Length: ', str(len(self.body)).encode(), b'\r\n',
b'Connection: ', ('keep-alive' if keep_alive else 'close').encode(), b'\r\n',
] + additional_headers + [
b'\r\n',
self.body,
])
def json(body, status=200, headers=None):
return HTTPResponse(ujson.dumps(body), headers=headers, status=status,
content_type="application/json; charset=utf-8")
def text(body, status=200, headers=None):
return HTTPResponse(body, status=status, headers=headers, content_type="text/plain; charset=utf-8")
def html(body, status=200, headers=None):
return HTTPResponse(body, status=status, headers=headers, content_type="text/html; charset=utf-8")
| Python | 0.000006 | @@ -36,16 +36,17 @@
200:
+b
'OK',%0D%0A
@@ -53,16 +53,17 @@
400:
+b
'Bad Req
@@ -79,16 +79,17 @@
401:
+b
'Unautho
@@ -106,16 +106,17 @@
402:
+b
'Payment
@@ -137,16 +137,17 @@
403:
+b
'Forbidd
@@ -161,16 +161,17 @@
404:
+b
'Not Fou
@@ -185,16 +185,17 @@
405:
+b
'Method
@@ -218,16 +218,17 @@
500:
+b
'Interna
@@ -254,16 +254,17 @@
501:
+b
'Not Imp
@@ -284,16 +284,17 @@
502:
+b
'Bad Gat
@@ -310,16 +310,17 @@
503:
+b
'Service
@@ -344,16 +344,17 @@
504:
+b
'Gateway
@@ -1014,31 +1014,28 @@
-additional
+timeout
_header
-s
=
-%5B%5D
+b''
%0D%0A
@@ -1058,20 +1058,16 @@
ive and
-not
keep_ali
@@ -1076,24 +1076,16 @@
_timeout
- is None
:%0D%0A
@@ -1095,30 +1095,25 @@
-additional
+timeout
_header
-s
=
-%5B
b'Ke
@@ -1130,23 +1130,26 @@
timeout=
-', str(
+%25d%5Cr%5Cn' %25
keep_ali
@@ -1162,185 +1162,129 @@
eout
-).encode(), b's%5Cr%5Cn'%5D%0D%0A if self.headers:%0D%0A for name, value in self.headers.items():%0D%0A additional_headers.append('%7B%7D: %7B%7D%5Cr%5Cn'.format(name
+%0D%0A%0D%0A headers = b''%0D%0A if self.headers:%0D%0A headers = b''.join(b'%25b: %25b%5Cr%5Cn' %25 (name.encode()
, value
-)
.enc
@@ -1300,160 +1300,219 @@
8'))
-%0D%0A%0D%0A return b''.join(%5B%0D%0A 'HTTP/%7B%7D %7B%7D %7B%7D%5Cr%5Cn'.format(version, self.status,%0D%0A
+ for name, value in self.headers.items())%0D%0A return b'HTTP/%25b %25d %25b%5Cr%5CnContent-Type: %25b%5Cr%5CnContent-Length: %25d%5Cr%5CnConnection: %25b%5Cr%5Cn%25b%25b%5Cr%5Cn%25b' %25 (%0D%0A version.encode(),%0D%0A self.status,%0D%0A
@@ -1553,248 +1553,102 @@
us,
+b
'FAIL')
-).encode(),%0D%0A b'Content-Type: ', self.content_type.encode(), b'%5Cr%5Cn',%0D%0A b'Content-Length: ', str(len(self.body)).encode(), b'%5Cr%5Cn',%0D%0A b'Connection: ', (
+,%0D%0A self.content_type.encode(),%0D%0A len(self.body),%0D%0A b
'kee
@@ -1679,143 +1679,70 @@
lse
+b
'close'
-).encode(), b'%5Cr%5Cn',%0D%0A %5D + additional_headers + %5B%0D%0A b'%5Cr%5Cn',%0D%0A
+,%0D%0A timeout_header,%0D%0A headers,%0D%0A
@@ -1758,17 +1758,16 @@
elf.body
-,
%0D%0A
@@ -1772,28 +1772,9 @@
- %5D)%0D%0A
+)
%0D%0A%0D%0A
|
6662067dd688a61417252712b4f8cbdf8da74798 | Version bump 0.1.29 | swf/__init__.py | swf/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
version = (0, 1, 28)
__title__ = "python-simple-workflow"
__author__ = "Oleiade"
__license__ = "MIT"
__version__ = '.'.join(map(str, version))
| Python | 0 | @@ -58,17 +58,17 @@
(0, 1, 2
-8
+9
)%0A%0A__tit
|
68b2e1cb5a914d408761229bd27677e80967f5ff | Remove unused import. | hashbrown/management/commands/switches.py | hashbrown/management/commands/switches.py | from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils.six.moves import input
from hashbrown.models import Switch
from hashbrown.utils import SETTINGS_KEY, is_active, get_defaults
class Command(BaseCommand):
help = 'Creates / deletes feature switches in the database'
def add_arguments(self, parser):
parser.add_argument(
'--delete',
action='store_true',
default=False,
help='Delete switches in the database that are not in ' + SETTINGS_KEY,
)
parser.add_argument(
'--force',
action='store_true',
default=False,
help='Delete switches without confirmation (implies --delete)',
)
def handle(self, *args, **kwargs):
if kwargs['delete'] or kwargs['force']:
self._delete_switches(force=kwargs['force'])
self._create_switches()
self.stderr.write('All switches up-to-date.')
def _create_switches(self):
create_switches(self.stderr)
def _delete_switches(self, force=False):
delete_switches(self.stderr, force=force)
def create_switches(stderr):
"""Create switches listed in HASHBROWN_SWITCH_DEFAULTS which aren't in
the database yet.
"""
defaults = get_defaults()
installed_switches = set(Switch.objects.values_list('label', flat=True))
missing_switches = set(defaults) - installed_switches
for label in sorted(missing_switches):
is_active(label)
stderr.write('Created switch %r.' % label)
return missing_switches
def delete_switches(stderr, force=False):
defaults = get_defaults()
installed_switches = set(Switch.objects.values_list('label', flat=True))
unknown_switches = sorted(installed_switches - set(defaults))
if not unknown_switches:
return
permission_granted = force or ask_permission(stderr, unknown_switches)
if permission_granted:
Switch.objects.filter(label__in=unknown_switches).delete()
for label in unknown_switches:
stderr.write('Deleted switch %r.' % label)
def ask_permission(stderr, switches):
stderr.write('The following switches are in the database but not in %s:' % SETTINGS_KEY)
for label in switches:
stderr.write(label)
response = input('Delete switches? [y/N]: ')
return response.lower().strip() in ('y', 'yes')
| Python | 0 | @@ -1,38 +1,4 @@
-from optparse import make_option%0A%0A
from
|
6876fc6339fe91ea21b573dc23cb989561f6192a | Use lazy util to refresh img-list in event model. | hhlevents/apps/hhlregistrations/models.py | hhlevents/apps/hhlregistrations/models.py | import datetime
import uuid
from glob import glob
from os.path import basename
from django.db import models
from django_markdown.models import MarkdownField
from django_markdown.fields import MarkdownFormField
from happenings.models import Event as HappeningsEvent
from happenings.utils.next_event import get_next_event
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _ # _lazy required
from datetime import date
from django.conf import settings
class Event(HappeningsEvent):
# Get all png-images from static/img/
IMAGES = ( ("/static/img/"+basename(x), basename(x))
for x in glob(settings.HHLREGISTRATIONS_ROOT+"/static/img/*.png")
)
# Options for registration requirements, also option for not accepting registrations
REG_REQUIREMENT = ( ('RQ', 'Required'),
('OP', 'Optional'),
('NO', 'None') )
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
extra_url = models.URLField(blank=True)
gforms_url = models.URLField(blank=True)
registration_requirement = models.CharField(max_length=2, choices=REG_REQUIREMENT)
max_registrations = models.PositiveSmallIntegerField(default=0)
close_registrations = models.DateTimeField(blank=True, null=True)
payment_due = models.DateTimeField(blank=True, null=True)
event_cost = models.PositiveSmallIntegerField(default=0)
materials_cost = models.PositiveSmallIntegerField(default=0)
materials_mandatory = models.BooleanField(default=False)
hide_join_checkbox = models.BooleanField(default=False)
image = models.CharField(max_length=100, choices=IMAGES, default=0)
def formLink(self):
tag = '<a href="' + reverse('registrations:register', args=[str(self.id)]) + '">Form</a>'
if self.registration_requirement in ('OP', 'NO'):
# in italics if registration is optional
tag = '<i>(' + tag + ')</i>'
return tag
formLink.allow_tags = True
formLink.short_description = _('Form link')
def getParticipants(self):
return Registration.objects.all().filter(event = self.event).order_by('state', 'registered')
def getStatsHTML(self):
n_AC = Registration.objects.all().filter(event = self.event).filter(state = 'AC').count()
n_CC = Registration.objects.all().filter(event = self.event).filter(state = 'CC').count()
n_CP = Registration.objects.all().filter(event = self.event).filter(state = 'CP').count()
n_WL = Registration.objects.all().filter(event = self.event).filter(state = 'WL').count()
n_CA = Registration.objects.all().filter(event = self.event).filter(state = 'CA').count()
n_CR = Registration.objects.all().filter(event = self.event).filter(state = 'CR').count()
n_WB = Registration.objects.all().filter(event = self.event).filter(state = 'WB').count()
return u'Assumed coming (AC): %s<br/>Confirmed coming (CC): %s</br>Confirmed, pre-payments OK (CP): %s<br/>Waiting-list (WL): %s<br/>Cancelled (CA): %s</br>Cancelled, refunded (CR): %s<br/>Waiting-list (due to ban) (WB): %s' % (n_AC, n_CC, n_CP, n_WL, n_CA, n_CR, n_WB)
class Meta:
ordering = ["-end_date"]
verbose_name = _('event')
verbose_name_plural = _('events')
def isPast(self):
if self.repeats('NEVER') and timezone.now() > self.end_date:
return True
elif not self.repeats('NEVER') and self.end_repeat < self.end_date.date():
# Error state, handle somehow differently later on
return False
elif not self.repeats('NEVER') and self.end_repeat <= timezone.now().date():
return True
return False
def isCancelled(self):
if self.check_if_cancelled(timezone.now()):
return True
return False
def isRepeating(self):
if self.repeats('NEVER'):
return False
return True
def getNextEvent(self): # next occurrence of this happening
if self.repeats('NEVER'):
return self.start_date
elif self.end_repeat > timezone.now().date():
next = get_next_event([self], timezone.now())
pvm = date(next[0], next[1], next[2])
return pvm
# in case repetition has ended, show nothing
return None
class Person(models.Model):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
email = models.EmailField(unique=True)
banned = models.DateTimeField(blank=True, null=True, verbose_name=u'Automatically put to waiting list')
def __unicode__(self):
return self.formatted_email
@property
def formatted_email(self):
return u'%s, %s <%s>' % (self.last_name, self.first_name, self.email)
class Meta:
ordering = ["last_name"]
verbose_name = _('participant')
verbose_name_plural = _('participants')
class Registration(models.Model):
STATES = (
( 'AC', 'Assumed coming'),
( 'CC', 'Confirmed coming'),
( 'CP', 'Confirmed, pre-payments OK'),
( 'WL', 'Waiting-list'),
( 'CA', 'Cancelled'),
( 'CR', 'Cancelled, refunded'),
( 'WB', 'Waiting-list (due to ban)'),
)
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
paid = models.DateTimeField(blank=True, null=True)
event = models.ForeignKey(Event, related_name='persons', on_delete=models.CASCADE)
person = models.ForeignKey(Person, related_name='events', on_delete=models.CASCADE)
registered = models.DateTimeField(default=datetime.datetime.now)
cancelled = models.DateTimeField(blank=True, null=True)
state = models.CharField(max_length=2, choices=STATES)
wants_materials = models.BooleanField(default=False)
# ajankohta milloin ilmoittautui? jonottaminen?
class Meta:
unique_together = (('event', 'person'),)
ordering = ["event"]
verbose_name = _('registration')
verbose_name_plural = _('registration')
def __unicode__(self):
return u'%s, %s <%s> (%s)' % (self.person.last_name, self.person.first_name, self.person.email, self.state)
| Python | 0 | @@ -465,16 +465,57 @@
equired%0A
+from django.utils.functional import lazy%0A
from dat
@@ -570,52 +570,36 @@
gs%0A%0A
-%0A
-class Event(HappeningsEvent):%0A # Get all
+# Always get a fresh list of
png
@@ -627,22 +627,34 @@
mg/%0A
-
+def
IMAGES
- = (
+():%0A return %5B
(%22/
@@ -678,17 +678,16 @@
name(x),
-
basename
@@ -690,30 +690,16 @@
name(x))
-%0A
for x i
@@ -760,23 +760,41 @@
ng%22)
-%0A )
+ %5D%0A%0Aclass Event(HappeningsEvent):
%0A
@@ -1757,16 +1757,21 @@
ces=
+lazy(
IMAGES,
defa
@@ -1770,21 +1770,17 @@
ES,
-default=0)
+tuple)())
%0A
|
ac85a3ce64ea815fd3530085c085e384cf8269fb | Use pydeconz interface controls for button platform (#74654) | homeassistant/components/deconz/button.py | homeassistant/components/deconz/button.py | """Support for deCONZ buttons."""
from __future__ import annotations
from dataclasses import dataclass
from pydeconz.models.event import EventType
from pydeconz.models.scene import Scene as PydeconzScene
from homeassistant.components.button import (
DOMAIN,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .deconz_device import DeconzSceneMixin
from .gateway import DeconzGateway, get_gateway_from_config_entry
@dataclass
class DeconzButtonDescriptionMixin:
"""Required values when describing deCONZ button entities."""
suffix: str
button_fn: str
@dataclass
class DeconzButtonDescription(ButtonEntityDescription, DeconzButtonDescriptionMixin):
"""Class describing deCONZ button entities."""
ENTITY_DESCRIPTIONS = {
PydeconzScene: [
DeconzButtonDescription(
key="store",
button_fn="store",
suffix="Store Current Scene",
icon="mdi:inbox-arrow-down",
entity_category=EntityCategory.CONFIG,
)
]
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ button entity."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_scene(_: EventType, scene_id: str) -> None:
"""Add scene button from deCONZ."""
scene = gateway.api.scenes[scene_id]
async_add_entities(
DeconzButton(scene, gateway, description)
for description in ENTITY_DESCRIPTIONS.get(PydeconzScene, [])
)
gateway.register_platform_add_device_callback(
async_add_scene,
gateway.api.scenes,
)
class DeconzButton(DeconzSceneMixin, ButtonEntity):
"""Representation of a deCONZ button entity."""
TYPE = DOMAIN
def __init__(
self,
device: PydeconzScene,
gateway: DeconzGateway,
description: DeconzButtonDescription,
) -> None:
"""Initialize deCONZ number entity."""
self.entity_description: DeconzButtonDescription = description
super().__init__(device, gateway)
self._attr_name = f"{self._attr_name} {description.suffix}"
async def async_press(self) -> None:
"""Store light states into scene."""
async_button_fn = getattr(self._device, self.entity_description.button_fn)
await async_button_fn()
def get_device_identifier(self) -> str:
"""Return a unique identifier for this scene."""
return f"{super().get_device_identifier()}-{self.entity_description.key}"
| Python | 0 | @@ -2608,21 +2608,57 @@
ttr(
-self._device,
+%0A self.gateway.api.scenes,%0A
sel
@@ -2687,16 +2687,26 @@
utton_fn
+,%0A
)%0A
@@ -2729,16 +2729,54 @@
tton_fn(
+self._device.group_id, self._device.id
)%0A%0A d
|
8c657d4254a1f64f2b8efebef429d0e685fe0f26 | use the input stream codec as the template for the output streams (#22747) | homeassistant/components/stream/worker.py | homeassistant/components/stream/worker.py | """Provides the worker thread needed for processing streams."""
from fractions import Fraction
import io
import logging
from .const import AUDIO_SAMPLE_RATE
from .core import Segment, StreamBuffer
_LOGGER = logging.getLogger(__name__)
def generate_audio_frame():
"""Generate a blank audio frame."""
from av import AudioFrame
audio_frame = AudioFrame(format='dbl', layout='mono', samples=1024)
# audio_bytes = b''.join(b'\x00\x00\x00\x00\x00\x00\x00\x00'
# for i in range(0, 1024))
audio_bytes = b'\x00\x00\x00\x00\x00\x00\x00\x00' * 1024
audio_frame.planes[0].update(audio_bytes)
audio_frame.sample_rate = AUDIO_SAMPLE_RATE
audio_frame.time_base = Fraction(1, AUDIO_SAMPLE_RATE)
return audio_frame
def create_stream_buffer(stream_output, video_stream, audio_frame):
"""Create a new StreamBuffer."""
import av
a_packet = None
segment = io.BytesIO()
output = av.open(
segment, mode='w', format=stream_output.format)
vstream = output.add_stream(
stream_output.video_codec, video_stream.rate)
# Fix format
vstream.codec_context.format = \
video_stream.codec_context.format
# Check if audio is requested
astream = None
if stream_output.audio_codec:
astream = output.add_stream(
stream_output.audio_codec, AUDIO_SAMPLE_RATE)
# Need to do it multiple times for some reason
while not a_packet:
a_packets = astream.encode(audio_frame)
if a_packets:
a_packet = a_packets[0]
return (a_packet, StreamBuffer(segment, output, vstream, astream))
def stream_worker(hass, stream, quit_event):
"""Handle consuming streams."""
import av
container = av.open(stream.source, options=stream.options)
try:
video_stream = container.streams.video[0]
except (KeyError, IndexError):
_LOGGER.error("Stream has no video")
return
audio_frame = generate_audio_frame()
outputs = {}
first_packet = True
sequence = 1
audio_packets = {}
last_dts = None
while not quit_event.is_set():
try:
packet = next(container.demux(video_stream))
if packet.dts is None:
if first_packet:
continue
# If we get a "flushing" packet, the stream is done
raise StopIteration("No dts in packet")
except (av.AVError, StopIteration) as ex:
# End of stream, clear listeners and stop thread
for fmt, _ in outputs.items():
hass.loop.call_soon_threadsafe(
stream.outputs[fmt].put, None)
_LOGGER.error("Error demuxing stream: %s", str(ex))
break
# Skip non monotonically increasing dts in feed
if not first_packet and last_dts >= packet.dts:
continue
last_dts = packet.dts
# Reset segment on every keyframe
if packet.is_keyframe:
# Save segment to outputs
segment_duration = (packet.pts * packet.time_base) / sequence
for fmt, buffer in outputs.items():
buffer.output.close()
del audio_packets[buffer.astream]
if stream.outputs.get(fmt):
hass.loop.call_soon_threadsafe(
stream.outputs[fmt].put, Segment(
sequence, buffer.segment, segment_duration
))
# Clear outputs and increment sequence
outputs = {}
if not first_packet:
sequence += 1
# Initialize outputs
for stream_output in stream.outputs.values():
if video_stream.name != stream_output.video_codec:
continue
a_packet, buffer = create_stream_buffer(
stream_output, video_stream, audio_frame)
audio_packets[buffer.astream] = a_packet
outputs[stream_output.name] = buffer
# First video packet tends to have a weird dts/pts
if first_packet:
packet.dts = 0
packet.pts = 0
first_packet = False
# Store packets on each output
for buffer in outputs.values():
# Check if the format requires audio
if audio_packets.get(buffer.astream):
a_packet = audio_packets[buffer.astream]
a_time_base = a_packet.time_base
# Determine video start timestamp and duration
video_start = packet.pts * packet.time_base
video_duration = packet.duration * packet.time_base
if packet.is_keyframe:
# Set first audio packet in sequence to equal video pts
a_packet.pts = int(video_start / a_time_base)
a_packet.dts = int(video_start / a_time_base)
# Determine target end timestamp for audio
target_pts = int((video_start + video_duration) / a_time_base)
while a_packet.pts < target_pts:
# Mux audio packet and adjust points until target hit
buffer.output.mux(a_packet)
a_packet.pts += a_packet.duration
a_packet.dts += a_packet.duration
audio_packets[buffer.astream] = a_packet
# Assign the video packet to the new stream & mux
packet.stream = buffer.vstream
buffer.output.mux(packet)
| Python | 0 | @@ -1040,158 +1040,30 @@
eam(
-%0A stream_output.video_codec, video_stream.rate)%0A # Fix format%0A vstream.codec_context.format = %5C%0A video_stream.codec_context.format
+template=video_stream)
%0A
|
9b529f2ba00c4ee76b571ef1c27faada89e4bc29 | Add full compatibility functions. | alchy/_compat.py | alchy/_compat.py | #pylint: skip-file
'''Python 2/3 compatibility
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
Borrowed from https://github.com/mitsuhiko/flask/blob/master/flask/_compat.py
'''
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Certain versions of pypy have a bug where clearing the exception stack
# breaks the __exit__ function in a very peculiar way. This is currently
# true for pypy 2.2.1 for instance. The second level of exception blocks
# is necessary because pypy seems to forget to check if an exception
# happend until the next bytecode instruction?
BROKEN_PYPY_CTXMGR_EXIT = False
if hasattr(sys, 'pypy_version_info'):
class _Mgr(object):
def __enter__(self):
return self
def __exit__(self, *args):
sys.exc_clear()
try:
try:
with _Mgr():
raise AssertionError()
except:
raise
except TypeError:
BROKEN_PYPY_CTXMGR_EXIT = True
except AssertionError:
pass
| Python | 0 | @@ -316,16 +316,40 @@
0%5D == 2%0A
+_identity = lambda x: x%0A
%0A%0Aif not
@@ -555,16 +555,227 @@
tems())%0A
+%0A from io import StringIO%0A%0A def reraise(tp, value, tb=None):%0A if value.__traceback__ is not tb:%0A raise value.with_traceback(tb)%0A raise value%0A%0A implements_to_string = _identity%0A%0A
else:%0A
@@ -984,16 +984,277 @@
tems()%0A%0A
+ from cStringIO import StringIO%0A%0A exec('def reraise(tp, value, tb=None):%5Cn raise tp, value, tb')%0A%0A def implements_to_string(cls):%0A cls.__unicode__ = cls.__str__%0A cls.__str__ = lambda x: x.__unicode__().encode('utf-8')%0A return cls%0A%0A
%0Adef wit
|
b241f242b6eae6c2c19a6fe0336933c22998f262 | deal with negative RAs | bin/full_map_any_sky.py | bin/full_map_any_sky.py |
#!/usr/bin/env python
import sys
import os
import numpy as N
from pylab import *
import matplotlib.pyplot as P
import matplotlib.patches as mpatches
def readmulticolumn(f,names,types):
for line in f:
line=line.strip()
if line.startswith('#') or len(line)==0:
continue
items=line.split()
for j in range(len(names)):
names[j].append(types[j](items[j]))
return
ra=list()
dec=list()
av_tfs1=list()
av_tfs2=list()
newfig=list()
names=[ra,dec,av_tfs1,av_tfs2]
types=[float,float,int,int]
tiling=input("Standard tiling=0 Randomized tiling=1 ")
if(tiling==0):
fstandard=open('/project/projectdirs/desi/mocks/preliminary/new_random_map.txt','r')
else:
fstandard=open('/project/projectdirs/desi/mocks/preliminary/random_tiles_new_random_map.txt','r')
readmulticolumn(fstandard,names,types)
mra=N.array(ra)
mdec=N.array(dec)
mav_tfs1=N.array(av_tfs1)
mav_tfs2=N.array(av_tfs2)
while (1==1):
ra_min=input("ra_min = ")
ra_max=input("ra_max = ")
dec_min=input("dec_min = ")
dec_max=input("dec_max = ")
spotsize=input("spotsize = ")
ra_center=(ra_min+ra_max)/2.
dec_center=(dec_min+dec_max)/2.
overlap=input("Don't include overlap of positioners=0 Do include overlap of positioners")
if (ra_min>=0 & ra_max>=0):
ii=(mra<ra_max)&(mra>ra_min)&(mdec<dec_max)&(mdec>dec_min)
if (ra_min<0 & ra_max>=0):
ii=(mdec<dec_max)&(mdec>dec_min)&(mra<ra_max|mra>ra_min+360)
if (ra_min<0 & ra_max<=0):
ii=(360+mra<ra_max)&(360+mra>ra_min)&(mdec<dec_max)&(mdec>dec_min)
nra=mra[ii]
ndec=mdec[ii]
if(overlap==0):
nav_tfs=mav_tfs2[ii]
else:
nav_tfs=mav_tfs2[ii]
max_av_tfs=N.amax(nav_tfs)
min_av_tfs=N.amin(nav_tfs)
values=N.arange(min_av_tfs,max_av_tfs+1)
for i in range(100): print nra[i],ndec[i],nav_tfs[i]
print len(nra)
length=len(nra)
#convert to radians
convert=3.1415296/180
nra=nra*convert
ndec=ndec*convert
ra_center=ra_center*convert
dec_center=dec_center*convert
ndec_center=N.empty(length); ndec_center.fill(dec_center)
nra_center=N.empty(length); nra_center.fill(ra_center)
delta_ra=nra-nra_center
delta_dec=ndec-ndec_center
sindec=N.sin(ndec)
cosdec=N.cos(ndec)
sindeltar=N.sin(delta_ra)
cosdeltar=N.cos(delta_ra)
cosdzero=N.cos(dec_center)
sindzero=N.sin(dec_center)
a=cosdec*sindeltar
b=sindec*cosdzero-sindzero*cosdec*cosdeltar
c=sindzero*sindec+cosdzero*cosdec*cosdeltar
x=a/c
y=b/c
x=x/convert+ra_center/convert
y=y/convert+dec_center/convert
f,(ax1,ax2)=plt.subplots(1,2,figsize=(14,7))
P.subplot(1,2,1)
def cmap_discretize(cmap, N):
if type(cmap) == str:
cmap = get_cmap(cmap)
colors_i = concatenate((linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]
# Return colormap object.
return matplotlib.colors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
cmap=cmap_discretize('jet',max_av_tfs-min_av_tfs+1)
P.scatter(x,y,c=nav_tfs-min_av_tfs,s=spotsize,edgecolor='none',cmap=cmap)
P.axis("scaled")
P.xlim(ra_min,ra_max)
P.ylim(dec_min,dec_max)
P.gca().invert_xaxis()
P.xlabel("RA",fontsize=16)
P.ylabel("dec",fontsize=16)
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array([])
mappable.set_clim(min_av_tfs-0.5,max_av_tfs+0.5)
bar=P.colorbar(mappable)
bar.set_ticks(N.linspace(min_av_tfs,max_av_tfs,len(values)))
bar.set_ticklabels(values)
P.subplot(1,2,2)
counts=N.zeros(15)
for i in range(15):
counts[i]=sum(nav_tfs==i)
norm_counts=counts/length
print norm_counts
nsq=nav_tfs*nav_tfs
summ=float(sum(nav_tfs))
mean=summ/float(length)
rms=(float(sum(nsq)-summ*summ/length))/length
print length, mean, rms
index=N.arange(15)
P.bar(index,norm_counts,align='center')
P.xlim([0,14])
P.text(8,0.2,"mean = {:10.3f}".format(mean))
P.text(8,0.17,"rms = {:10.3f}".format(rms))
P.xlabel("Fraction of Area Covered n Times",fontsize=16)
show()
| Python | 0.00002 | @@ -1470,9 +1470,12 @@
_max
-%7C
+ or
mra%3E
|
114d7bc6b45d18f528a7ed5c12e1938e35efb93c | Update hash_db_password.py | bin/hash_db_password.py | bin/hash_db_password.py | import sys
from werkzeug.security import generate_password_hash
from flask_appbuilder.security.models import User
try:
from app import app, db
except:
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
if len(sys.argv) < 2:
print "Without typical app structure use parameter to config"
print "Use example: python hash_db_password.py sqlite:////home/user/application/app.db"
exit()
con_str = sys.argv[1]
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = con_str
db = SQLAlchemy(app)
try:
print "using connection string: {}".format(app.config['SQLALCHEMY_DATABASE_URI'])
users = db.session.query(User).all()
except Exception as e:
print "Query, connection error {}".format(e.message)
print "Config db key {}".format(app.config['SQLALCHEMY_DATABASE_URI'])
exit()
for user in users:
print "Hashing password for {}".format(user.full_name)
user.password = generate_password_hash(user.password)
try:
db.session.merge(user)
db.session.commit()
except:
print "Error updating password for {}".format(user.full_name)
| Python | 0.000071 | @@ -771,16 +771,8 @@
at(e
-.message
)%0A
|
3224a95d79f6e3166e235f4cfc857a48d1b17c52 | Revise docstring: memoization | alg_fibonacci.py | alg_fibonacci.py | """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def fibonacci_recur(n):
"""Get the nth number of Fibonacci series, Fn, by recursion.
- Time complexity: 2Fn - 1 = O(Fn); too fast.
- Space complexity: O(n).
"""
if n <= 1:
return n
else:
return fibonacci_recur(n - 1) + fibonacci_recur(n - 2)
def fibonacci_memo(n):
"""Get the nth number of Fibonacci series, Fn, by memorization.
- Time complexity: O(n).
- Space complexity: O(n).
"""
fn_d = {}
fn_d[0] = 0
fn_d[1] = 1
for n in range(2, n + 1):
fn_d[n] = fn_d[n - 1] + fn_d[n - 2]
return fn_d[n]
def fibonacci_dp(n):
"""Get the nth number of Fibonacci series by dynamic programming.
- Time complexity is still O(n), like fibonacci_memo().
- Space complexity is O(1), improving a lot.
"""
a, b = 0, 1
for _ in range(n):
a, b = a + b, a
return a
def fibonacci_gen(n):
"""Get the nth number of Fibonacci series by generator."""
a, b = 0, 1
for _ in range(n):
a, b = a + b, a
yield a
def main():
import time
n = 30
print('{}th number of Fibonacci series:'.format(n))
start_time = time.time()
print('By recursion: {}'.format(fibonacci_recur(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By memorization: {}'.format(fibonacci_memo(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By dynamic programming: {}'.format(fibonacci_dp(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By generator: {}'.format(list(fibonacci_gen(n))[-1]))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| Python | 0.998763 | @@ -580,25 +580,24 @@
Fn, by memo
-r
ization.%0A%0A
|
2ac49aacb0f9e7c15b48119bff13be45e9b3616d | Switch to compression to blosc as its faster than gzip | jax/experimental/gda_serialization/serialization.py | jax/experimental/gda_serialization/serialization.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GlobalDeviceArray serialization and deserialization."""
import asyncio
import re
from typing import Callable
import jax
from jax.experimental import global_device_array as gda
from jax.experimental.maps import Mesh
import jax.numpy as jnp
import numpy as np
import tensorstore as ts
async def create_async_gda_from_callback(
global_shape: gda.Shape,
global_mesh: Mesh,
mesh_axes: gda.MeshAxes,
data_callback: Callable[[gda.Index], asyncio.Future],
):
global_idx_rid = gda.get_shard_indices_replica_ids(
global_shape, global_mesh, mesh_axes)
local_devices = global_mesh.local_devices
future_arrays = [data_callback(global_idx_rid[d][0])
for d in local_devices]
# Pause here and come back to `from_async_callback()` when future_arrays are
# ready. device_put cannot happen with future_arrays.
local_arrays = await asyncio.gather(*future_arrays)
dbs = [jax.device_put(array, device)
for array, device in zip(local_arrays, local_devices)]
return gda.GlobalDeviceArray(global_shape, global_mesh, mesh_axes, dbs,
gda._GdaFastPathArgs(global_idx_rid, local_devices))
def _get_metadata(gda):
if gda.dtype == jnp.bfloat16:
# Tensorstore uses 'bfloat16', not '<V2'.
dtype = 'bfloat16'
else:
dtype = np.dtype(gda.dtype).str
return {
'compressor': {
'id': 'gzip'
},
'shape': gda.shape,
'chunks': np.array(gda.local_data(0).shape),
'dtype': dtype,
}
def get_tensorstore_spec(ckpt_path: str):
spec = {'driver': 'zarr', 'kvstore': {}}
if ckpt_path.startswith('gs://'):
m = re.fullmatch('^gs://([^/]*)/(.*)$', ckpt_path, re.DOTALL)
if m is None:
raise ValueError('The ckpt_path should contain the bucket name and the '
f'file path inside the bucket. Got: {ckpt_path}')
gcs_bucket = m.group(1)
path_without_bucket = m.group(2)
spec['kvstore'] = {'driver': 'gcs', 'bucket': gcs_bucket,
'path': path_without_bucket}
else:
spec['kvstore'] = {'driver': 'file', 'path': ckpt_path}
return spec
async def async_serialize(gda_inp: gda.GlobalDeviceArray, tensorstore_spec):
if not tensorstore_spec.get('metadata'):
tensorstore_spec['metadata'] = _get_metadata(gda_inp)
t = await ts.open(
ts.Spec(tensorstore_spec),
create=True,
open=True,
context=ts.Context({'file_io_concurrency': {
'limit': 128
}}))
async def _write_array(shard):
if shard.replica_id == 0:
await t[shard.index].write(shard.data)
future_write_state = jax.tree_util.tree_map(_write_array,
tuple(gda_inp.local_shards))
return await asyncio.gather(*future_write_state)
def run_serialization(gdas, tensorstore_specs):
async def _run_serializer():
future_writer = jax.tree_map(async_serialize, gdas, tensorstore_specs)
return await asyncio.gather(*future_writer)
asyncio.run(_run_serializer())
async def async_deserialize(mesh, mesh_axes, tensorstore_spec):
t = ts.open(ts.Spec(tensorstore_spec), open=True).result()
async def cb(index):
return await t[index].read()
return await create_async_gda_from_callback(t.shape, mesh, mesh_axes, cb)
def run_deserialization(global_meshes, mesh_axes, tensorstore_specs):
async def _run_deserializer():
future_gdas = jax.tree_map(async_deserialize, global_meshes, mesh_axes,
tensorstore_specs)
return await asyncio.gather(*future_gdas)
return asyncio.run(_run_deserializer())
| Python | 0 | @@ -1959,12 +1959,13 @@
': '
-gzip
+blosc
'%0A
|
858e84f336f76a1e65b730834ad8ffb346ee6b0f | fix logger singleton to work with pyjd | examples/mail/Logger.py | examples/mail/Logger.py | from pyjamas.ui.Grid import Grid
_logger = None
class Logger(Grid):
def __new__(cls):
global _logger
# make sure there is only one instance of this class
if _logger:
return _logger
_logger = Grid.__new__(cls)
return _logger
def __init__(self, target="", message=""):
#global _logger
if message:
return Logger().write(target, message)
# make sure there is only one instance of this class
if hasattr(self, target): return None
self.setSingleton()
Grid.__init__(self)
self.targets=[]
self.targets.append("app")
#self.targets.append("ui")
self.resize(len(self.targets)+1, 2)
self.setBorderWidth("1px")
self.counter=0
self.setHTML(0, 0, "<b>Log</b>")
self.setText(1, 0, "app")
for i in range(len(self.targets)):
target=self.targets[i]
self.setText(i+1, 0, target)
def setSingleton(self):
global _logger
_logger = self
def addTarget(self, target):
self.targets.append(target)
self.resize(len(self.targets)+1, 2)
self.setText(len(self.targets), 0, target)
return self.targets.index(target)
def write(self, target, message):
self.counter+=1
if target=='':
target='app'
try:
target_idx=self.targets.index(target)
except ValueError:
target_idx = -1
# add new target
if target_idx<0:
target_idx=self.addTarget(target)
target_row=target_idx+1
old_text=self.getHTML(target_row, 1)
log_line="%d: " % self.counter + message
if old_text==' ':
new_text=log_line
else:
new_text=old_text + "<br>" + log_line
self.setHTML(target_row, 1, new_text)
| Python | 0.000007 | @@ -59,16 +59,19 @@
gger
+Cls
(Grid):%0A
@@ -70,510 +70,32 @@
d):%0A
- def __new__(cls):%0A global _logger%0A # make sure there is only one instance of this class%0A if _logger:%0A return _logger%0A _logger = Grid.__new__(cls)%0A return _logger%0A%0A def __init__(self, target=%22%22, message=%22%22):%0A #global _logger%0A if message:%0A return Logger().write(target, message)%0A %0A # make sure there is only one instance of this class%0A if hasattr(self, target): return None%0A self.setSingleton()
+%0A def __init__(self):
%0A%0A
@@ -525,87 +525,8 @@
t)%0A%0A
- def setSingleton(self):%0A global _logger%0A _logger = self%0A %0A
@@ -1404,8 +1404,229 @@
text) %0A%0A
+def Logger(target=%22%22, message=%22%22):%0A global _logger%0A # make sure there is only one instance of this class%0A if not _logger:%0A _logger = LoggerCls()%0A%0A _logger.write(target, message)%0A %0A return _logger%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.