commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
6e43f611420068f0829fc64c1963ee51931b0099 | change name of data.py | node-interactions.py | node-interactions.py | Python | 0.000016 | @@ -0,0 +1,1420 @@
+import operator%0Afrom os import listdir%0Afrom os.path import isfile, join%0Aimport sys%0A%0Adef get_dict_of_all_contacts():%0A datapath = 'flu-data/moteFiles'%0A datafiles = %5Bf for f in listdir(datapath) if isfile(join(datapath,f)) %5D%0A dict_of_all_contacts = dict()%0A for datafile in datafiles:%0A node_contacts = dict()%0A f = open(join(datapath,datafile), 'r')%0A line = f.readline()%0A while line:%0A numlist = line.split()%0A if len(numlist) %3C 5:%0A continue%0A node = numlist%5B0%5D%0A time = int(numlist%5B-1%5D)%0A if node not in node_contacts:%0A node_contacts%5Bnode%5D = time%0A line = f.readline()%0A nodename = datafile%5B5:%5D%0A dict_of_all_contacts%5Bnodename%5D = node_contacts%0A f.close()%0A return dict_of_all_contacts%0A%0A%0Adict_of_all_contacts = get_dict_of_all_contacts()%0A%0A%0Anode1 = dict_of_all_contacts%5B'1'%5D%0A%0A%0Ainfected = %7B%7D%0Afor k, v in node1.iteritems():%0A infected%5Bk%5D = v%0A%0Afinal_infected = infected.copy()%0A%0Afor k,v in infected.iteritems():%0A current_node = dict_of_all_contacts%5Bk%5D%0A for k, v in current_node.iteritems():%0A if k not in infected:%0A final_infected%5Bk%5D = v%0A else:%0A if infected%5Bk%5D %3E v:%0A final_infected%5Bk%5D = v%0A%0Aprint len(final_infected)%0Asorted_infected = sorted(final_infected.iteritems(), key=operator.itemgetter(1))%0Aprint sorted_infected%0A%0A%0A
|
|
0c6becaa179aba9408def1b3cce61d5ec1509942 | Load the simul module and run a simulation | python/main.py | python/main.py | Python | 0 | @@ -0,0 +1,429 @@
+from simul import *%0A%0Aif __name__ == '__main__':%0A # create a new simulation%0A s = Simulation(Re=5)%0A%0A # initial conditions psi(0) = 0, Omega(0) = 0%0A s.psi.initial(%22null%22)%0A s.omega.initial(%22null%22)%0A # T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz%0A s.T.initial(lambda n, k: T_0(n,k,s))%0A%0A # main loop over time%0A while s.step():%0A s.T.step()%0A s.psi.step()%0A s.omega.step()%0A%0A del s%0A %0A
|
|
90169095a9e1adbc23e1efa35ea0e1a9a09259de | Solve Code Fights sortByHeight problem | Problems/sortByHeight.py | Problems/sortByHeight.py | Python | 0.999278 | @@ -0,0 +1,387 @@
+#!/usr/local/bin/python%0A# Code Fights Arcade Mode%0A%0A%0Adef sortByHeight(a):%0A trees = %5Bi for i, t in enumerate(a) if t == -1%5D%0A humans = sorted(%5Bh for h in a if h != -1%5D)%0A for tree in trees:%0A humans.insert(tree, -1)%0A return humans%0A%0A%0Adef main():%0A a = %5B-1, 150, 190, 170, -1, -1, 160, 180%5D%0A new = sortByHeight(a)%0A print(new)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
1a97d686ed5afd9a97083bc09f6c4bfb4ef124fc | Add quick helpers to get a client | helpers.py | helpers.py | Python | 0 | @@ -0,0 +1,518 @@
+from zaqarclient.queues import client%0A%0Aimport os%0A%0Aconf = %7B%0A 'auth_opts': %7B%0A 'backend': 'keystone',%0A 'options': %7B%0A 'os_username': os.environ.get('OS_USERNAME'),%0A 'os_password': os.environ.get('OS_PASSWORD'),%0A 'os_project_name': os.environ.get('OS_PROJECT_NAME', 'admin'),%0A 'os_auth_url': os.environ.get('OS_AUTH_URL') + '/v2.0/',%0A 'insecure': '',%0A %7D,%0A %7D,%0A%7D%0A%0Aclient = client.Client(url='http://192.168.122.58:8888', version=2, conf=conf)%0A
|
|
7aee3720617aa3442245e2d0bf3de7393e4acb01 | Add lc0133_clone_graph.py | lc0133_clone_graph.py | lc0133_clone_graph.py | Python | 0.000002 | @@ -0,0 +1,1464 @@
+%22%22%22Leetcode 133. Clone Graph%0AMedium%0A%0AURL: https://leetcode.com/problems/clone-graph/%0A%0AGiven a reference of a node in a connected undirected graph, return a deep copy%0A(clone) of the graph. Each node in the graph contains a val (int) and a list%0A(List%5BNode%5D) of its neighbors.%0A %0AExample:%0AInput:%0A%7B%22$id%22:%221%22,%22neighbors%22:%5B%7B%22$id%22:%222%22,%22neighbors%22:%5B%7B%22$ref%22:%221%22%7D,%0A%7B%22$id%22:%223%22,%22neighbors%22:%5B%7B%22$ref%22:%222%22%7D,%7B%22$id%22:%224%22,%22neighbors%22:%5B%7B%22$ref%22:%223%22%7D,%0A%7B%22$ref%22:%221%22%7D%5D,%22val%22:4%7D%5D,%22val%22:3%7D%5D,%22val%22:2%7D,%7B%22$ref%22:%224%22%7D%5D,%22val%22:1%7D%0AExplanation:%0ANode 1's value is 1, and it has two neighbors: Node 2 and 4.%0ANode 2's value is 2, and it has two neighbors: Node 1 and 3.%0ANode 3's value is 3, and it has two neighbors: Node 2 and 4.%0ANode 4's value is 4, and it has two neighbors: Node 1 and 3.%0A %0ANote:%0A- The number of nodes will be between 1 and 100.%0A- The undirected graph is a simple graph, which means no repeated edges and no%0A self-loops in the graph. %0A- Since the graph is undirected, if node p has node q as neighbor, then node q%0A must have node p as neighbor too.%0A- You must return the copy of the given node as a reference to the cloned graph.%0A%22%22%22%0A%0A# Definition for a Node.%0Aclass Node(object):%0A def __init__(self, val, neighbors):%0A self.val = val%0A self.neighbors = neighbors%0A%0A%0Aclass Solution(object):%0A def cloneGraph(self, node):%0A %22%22%22%0A :type node: Node%0A :rtype: Node%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
ef63c538aff066230030aaf02981933b652830e4 | Create module_posti.py | pyfibot/modules/module_posti.py | pyfibot/modules/module_posti.py | Python | 0.000001 | @@ -0,0 +1,2454 @@
+# -*- encoding: utf-8 -*-%0A%22%22%22%0AGet package tracking information from the Finnish postal service%0A%22%22%22%0A%0Afrom __future__ import unicode_literals, print_function, division%0Afrom bs4 import BeautifulSoup%0Aimport requests%0Afrom datetime import datetime, timedelta%0A%0Alang = 'en'%0A%0A%0Adef command_posti(bot, user, channel, args):%0A %22%22%22Parse the package status page%22%22%22%0A args = args.strip()%0A if not args:%0A return bot.say(channel, 'Need a tracking ID as argument.')%0A%0A url = 'http://www.itella.fi/itemtracking/itella/search_by_shipment_id'%0A%0A params = %7B%0A 'ShipmentId': args,%0A 'lang': lang,%0A 'LOTUS_hae': 'Hae',%0A 'LOTUS_side': '1'%0A %7D%0A%0A r = requests.post(url, params=params)%0A bs = BeautifulSoup(r.content)%0A%0A try:%0A status_table = bs.find('table', %7B'id': 'shipment-event-table'%7D).find_all('tr')%5B1%5D%0A except:%0A if lang == 'en':%0A return bot.say(channel, 'Item not found.')%0A return bot.say(channel, 'L%C3%A4hetyst%C3%A4 ei l%C3%B6ytynyt.')%0A%0A try:%0A event = status_table.find('div', %7B'class': 'shipment-event-table-header'%7D).text.strip()%0A except:%0A event = '???'%0A%0A location = '???'%0A dt = timedelta(0, 0, 0)%0A now = datetime.now()%0A for x in status_table.find_all('div', %7B'class': 'shipment-event-table-row'%7D):%0A try:%0A row_label = x.find('span', %7B'class': 'shipment-event-table-label'%7D).text.strip()%0A row_data = x.find('span', %7B'class': 'shipment-event-table-data'%7D).text.strip()%0A except:%0A continue%0A%0A if lang == 'en':%0A if row_label == 'Registration:':%0A dt = now - datetime.strptime(row_data, '%25d.%25m.%25Y %25H:%25M:%25S')%0A if row_label == 'Location:':%0A location = row_data%0A else:%0A if row_label == 'Rekister%C3%B6inti:':%0A dt = now - datetime.strptime(row_data, '%25d.%25m.%25Y klo %25H:%25M:%25S')%0A if row_label == 'Paikka:':%0A location = row_data%0A%0A agestr = %5B%5D%0A if dt.days %3E 0:%0A agestr.append('%25dd' %25 dt.days)%0A secs = dt.seconds%0A hours, minutes, seconds = secs // 3600, secs // 60 %25 60, secs %25 60%0A if hours %3E 0:%0A agestr.append('%25dh' %25 hours)%0A if minutes %3E 0:%0A agestr.append('%25dm' %25 minutes)%0A%0A if lang == 'en':%0A return bot.say(channel, '%25s - %25s - %25s' %25 (' '.join(agestr) + ' ago', event, location))%0A return bot.say(channel, '%25s - %25s - %25s' %25 (' '.join(agestr) + ' sitten', event, location))%0A
|
|
fbfdc979b5fbb7534a625db390b92856714dcfe1 | add basic tests for model_utils | pysat/tests/test_model_utils.py | pysat/tests/test_model_utils.py | Python | 0 | @@ -0,0 +1,1337 @@
+import numpy as np%0Aimport sys%0A%0Afrom nose.tools import assert_raises, raises%0Aimport pandas as pds%0A%0Aimport pysat%0Afrom pysat import model_utils as mu%0A%0A%0Aclass TestBasics():%0A def setup(self):%0A %22%22%22Runs before every method to create a clean testing setup.%22%22%22%0A self.testInst = pysat.Instrument(platform='pysat',%0A name='testing',%0A clean_level='clean')%0A self.start = pysat.datetime(2009, 1, 1)%0A self.stop = pysat.datetime(2009, 1, 1)%0A%0A def teardown(self):%0A %22%22%22Runs after every method to clean up previous testing.%22%22%22%0A del self.testInst, self.start, self.stop%0A%0A @raises(ValueError)%0A def test_collect_inst_model_pairs_wo_date(self):%0A %22%22%22Try to run without start or stop dates%22%22%22%0A match = mu.collect_inst_model_pairs(inst=self.testInst)%0A%0A @raises(ValueError)%0A def test_collect_inst_model_pairs_wo_inst(self):%0A %22%22%22Try to run without an instrument%22%22%22%0A match = mu.collect_inst_model_pairs(start=self.start, stop=self.stop)%0A%0A @raises(ValueError)%0A def test_collect_inst_model_pairs_wo_model(self):%0A %22%22%22Try to run without a model%22%22%22%0A match = mu.collect_inst_model_pairs(start=self.start, stop=self.stop,%0A inst=self.testInst)%0A
|
|
458a4a3e5759c4ea1e5b33349288012a86d0d97d | revert syntax object instantiation change as it appears to be buggy. use an optional, dedicated value mangling methods instead. | pysnmp/entity/rfc3413/mibvar.py | pysnmp/entity/rfc3413/mibvar.py | # MIB variable pretty printers/parsers
import types
from pyasn1.type import univ
from pysnmp.smi.error import NoSuchObjectError
# Name
def mibNameToOid(mibView, name):
if type(name[0]) == types.TupleType:
modName, symName = apply(lambda x='',y='': (x,y), name[0])
if modName: # load module if needed
mibView.mibBuilder.loadModules(modName)
else:
mibView.mibBuilder.loadModules() # load all (slow)
if symName:
oid, label, suffix = mibView.getNodeNameByDesc(symName, modName)
else:
oid, label, suffix = mibView.getFirstNodeName(modName)
suffix = name[1:]
else:
oid, label, suffix = mibView.getNodeNameByOid(name)
modName, symName, _s = mibView.getNodeLocation(oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column XXX
modName, symName, _s = mibView.getNodeLocation(oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(modName, symName)
return oid, apply(rowNode.getInstIdFromIndices, suffix)
else: # scalar or incomplete spec
return oid, suffix
__scalarSuffix = (univ.Integer(0),)
def oidToMibName(mibView, oid):
_oid, label, suffix = mibView.getNodeNameByOid(tuple(oid))
modName, symName, __suffix = mibView.getNodeLocation(_oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column
__modName, __symName, __s = mibView.getNodeLocation(_oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(__modName, __symName)
return (symName, modName), rowNode.getIndicesFromInstId(suffix)
elif not suffix: # scalar
return (symName, modName), suffix
elif suffix == (0,): # scalar
return (symName, modName), __scalarSuffix
else:
raise NoSuchObjectError(
str='No MIB info for %s (closest parent %s)' %
(oid, mibNode.name)
)
# Value
def cloneFromMibValue(mibView, modName, symName, value):
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'syntax'): # scalar
return mibNode.syntax.__class__(value)
else:
return # identifier
| Python | 0 | @@ -2264,17 +2264,13 @@
tax.
-__class__
+clone
(val
|
6cd8b4c733de5a4ed39e3d3ba3d06e78b04dbb4b | read a value from a file that is in ConfigObj format - no section check | python/2.7/read_config_value.py | python/2.7/read_config_value.py | Python | 0 | @@ -0,0 +1,875 @@
+#!/usr/bin/env python%0A%0Afrom configobj import ConfigObj%0Aimport argparse%0Aimport os%0Aimport sys%0A%0Adef read_config(fname, skey):%0A%09config = ConfigObj(fname, raise_errors=True)%0A%09return config%5Bskey%5D%0A%0Adef main():%0A%09parser = argparse.ArgumentParser(description='read a value from a ConfigObj file', prog=os.path.basename(__file__))%0A%09parser.add_argument('-f', '--file', help='input file', type=str)%0A%09parser.add_argument('-k', '--key', help='key to read', type=str)%0A%09args = parser.parse_args()%0A%0A%09if args.file is None:%0A%09%09parser.print_usage()%0A%09%09return%0A%0A%09if args.key is None:%0A%09%09parser.print_usage()%0A%09%09return%0A%0A%09if os.path.isfile(args.file):%0A%09%09try:%0A%09%09%09value = read_config(args.file, args.key)%0A%09%09%09print value%0A%09%09except:%0A%09%09%09print %3E%3E sys.stderr, '%5Be%5D unable to read key:', args.key%0A%09%09%09return%0A%09else:%0A%09%09print %3E%3E sys.stderr, '%5Be%5D unable to access file:', args.file%0A%0Aif __name__ == '__main__':%0A%09main()%0A
|
|
837d1f26ad339fbe4338ef69c947f83042daba9f | add prelim script for looking at incident data | Scripts/fire_incident.py | Scripts/fire_incident.py | Python | 0.000001 | @@ -0,0 +1,458 @@
+#Weinschenk%0A#12-14%0A%0Afrom __future__ import division%0Aimport numpy as np%0Aimport pandas as pd%0Afrom pylab import *%0Afrom matplotlib import rcParams%0ArcParams.update(%7B'figure.autolayout': True%7D)%0A%0Aincident = pd.read_csv('../Data/arlington_incidents.csv', header=0)%0Atotal_incidents = len(incident%5B'incident_class_code'%5D)%0Atotal_fires = 0%0Afor i in incident%5B'incident_class_code'%5D:%0A%09if i == 1:%0A%09 total_fires = total_fires + 1 %0A%0Aprint 100*(total_fires/total_incidents)%0A
|
|
ab00f54344e4aa39503a59551e87db2ed4be9c3d | Create print_rectangle.py | python3/print_rectangle.py | python3/print_rectangle.py | Python | 0.001609 | @@ -0,0 +1,168 @@
+while 1:%0A m, n = input().split()# m:height, n:width%0A if m == %220%22 and n == %220%22:%0A breaku%0A for i in range(int(m)):%0A print(%22#%22 * int(n))%0A print()%0A
|
|
3897513dcba7b4a94fcacac8c264afadf134ebe5 | Put latency calculations onto the bridge to not have issues related to clock synchronization. | judge/bridge/judgehandler.py | judge/bridge/judgehandler.py | import logging
import json
import threading
import time
from event_socket_server import ZlibPacketHandler
logger = logging.getLogger('judge.bridge')
class JudgeHandler(ZlibPacketHandler):
def __init__(self, server, socket):
super(JudgeHandler, self).__init__(server, socket)
self.handlers = {
'grading-begin': self.on_grading_begin,
'grading-end': self.on_grading_end,
'compile-error': self.on_compile_error,
'batch-begin': self.on_batch_begin,
'batch-end': self.on_batch_end,
'test-case-status': self.on_test_case,
'problem-not-exist': self.on_bad_problem,
'submission-terminated': self.on_submission_terminated,
'submission-acknowledged': self.on_submission_acknowledged,
'ping-response': self.on_ping_response,
'supported-problems': self.on_supported_problems,
'handshake': self.on_handshake,
}
self._to_kill = True
self._working = False
self._received = threading.Event()
self._no_response_job = None
self._problems = []
self.executors = []
self.problems = {}
self.latency = None
self.load = 1e100
self.name = None
self.batch_id = None
self.client_address = socket.getpeername()
self.server.schedule(5, self._kill_if_no_auth)
logger.info('Judge connected from: %s', self.client_address)
def _kill_if_no_auth(self):
if self._to_kill:
logger.info('Judge not authenticated: %s', self.client_address)
self.close()
def on_close(self):
self._to_kill = False
self.server.judges.remove(self)
if self.name is not None:
self._disconnected()
logger.info('Judge disconnected from: %s', self.client_address)
def _authenticate(self, id, key):
return False
def _connected(self):
pass
def _disconnected(self):
pass
def _update_ping(self):
pass
def _format_send(self, data):
return super(JudgeHandler, self)._format_send(json.dumps(data, separators=(',', ':')))
def on_handshake(self, packet):
if 'id' not in packet or 'key' not in packet or not self._authenticate(packet['id'], packet['key']):
self.close()
self._to_kill = False
self._problems = packet['problems']
self.problems = dict(self._problems)
self.executors = packet['executors']
self.name = packet['id']
self.send({'name': 'handshake-success'})
logger.info('Judge authenticated: %s', self.client_address)
self.server.judges.register(self)
self._connected()
def can_judge(self, problem, executor):
return problem in self.problems and executor in self.executors
@property
def working(self):
return bool(self._working)
def problem_data(self, problem):
return 2, 16384, False
def submit(self, id, problem, language, source):
time, memory, short = self.problem_data(problem)
self.send({
'name': 'submission-request',
'submission-id': id,
'problem-id': problem,
'language': language,
'source': source,
'time-limit': time,
'memory-limit': memory,
'short-circuit': short,
})
self._working = id
self._no_response_job = self.server.schedule(20, self._kill_if_no_response)
self._received.clear()
def _kill_if_no_response(self):
logger.error('Judge seems dead: %s: %s', self.name, self._working)
self.close()
def on_submission_acknowledged(self, packet):
if not packet.get('submission-id', None) == self._working:
logger.error('Wrong acknowledgement: %s: %s, expected: %s', self.name, packet.get('submission-id', None),
self._working)
self.close()
return
logger.info('Submission acknowledged: %d', self._working)
if self._no_response_job:
self.server.unschedule(self._no_response_job)
self._received.set()
self._no_response_job = None
def abort(self):
self.send({'name': 'terminate-submission'})
def get_current_submission(self):
return self._working or None
def ping(self):
self.send({'name': 'ping', 'when': time.time()})
def packet(self, data):
try:
try:
data = json.loads(data)
if 'name' not in data:
raise ValueError
except ValueError:
self.on_malformed(data)
else:
handler = self.handlers.get(data['name'], self.on_malformed)
handler(data)
except:
logger.exception('Error in packet handling (Judge-side)')
# You can't crash here because you aren't so sure about the judges
# not being malicious or simply malforms. THIS IS A SERVER!
def _submission_is_batch(self, id):
pass
def on_supported_problems(self, packet):
logger.info('Updated problem list')
self._problems = packet['problems']
self.problems = dict(self._problems)
if not self.working:
self.server.judges.update_problems(self)
def on_grading_begin(self, packet):
logger.info('Grading has begun on: %s', packet['submission-id'])
self.batch_id = None
def on_grading_end(self, packet):
logger.info('Grading has ended on: %s', packet['submission-id'])
self._free_self(packet)
self.batch_id = None
def on_compile_error(self, packet):
logger.info('Submission failed to compile: %s', packet['submission-id'])
self._free_self(packet)
def on_bad_problem(self, packet):
logger.error('Submission referenced invalid problem "%s": %s', packet['problem'], packet['submission-id'])
self._free_self(packet)
def on_submission_terminated(self, packet):
logger.info('Submission aborted: %s', packet['submission-id'])
self._free_self(packet)
def on_batch_begin(self, packet):
logger.info('Batch began on: %s', packet['submission-id'])
if self.batch_id is None:
self.batch_id = 0
self._submission_is_batch(packet['submission-id'])
self.batch_id += 1
def on_batch_end(self, packet):
logger.info('Batch ended on: %s', packet['submission-id'])
def on_test_case(self, packet):
logger.info('Test case completed on: %s', packet['submission-id'])
def on_malformed(self, packet):
logger.error('Malformed packet: %s', packet)
def on_ping_response(self, packet):
self.latency = packet['time']
self.load = packet['load']
self._update_ping()
def _free_self(self, packet):
self._working = False
self.server.judges.on_judge_free(self, packet['submission-id'])
| Python | 0 | @@ -6831,16 +6831,30 @@
atency =
+ time.time() -
packet%5B
@@ -6854,20 +6854,20 @@
packet%5B'
-time
+when
'%5D%0A
|
989a94c81f74a17707e66f126960b6bb45e9b4d5 | Add index to cover testgroup_details (previous runs) | migrations/versions/3042d0ca43bf_index_job_project_id.py | migrations/versions/3042d0ca43bf_index_job_project_id.py | Python | 0 | @@ -0,0 +1,491 @@
+%22%22%22Index Job(project_id, status, date_created) where patch_id IS NULL%0A%0ARevision ID: 3042d0ca43bf%0ARevises: 3a3366fb7822%0ACreate Date: 2014-01-03 15:24:39.947813%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '3042d0ca43bf'%0Adown_revision = '3a3366fb7822'%0A%0Afrom alembic import op%0A%0A%0Adef upgrade():%0A op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL')%0A%0A%0Adef downgrade():%0A op.drop_index('idx_job_previous_runs', 'job')%0A
|
|
b96f39b3527cef7fd9766315fbdf7b87b6315ec8 | add watch file which generated by scratch | src/car_control_manual/scratch/watch_file.py | src/car_control_manual/scratch/watch_file.py | Python | 0 | @@ -0,0 +1,2356 @@
+from __future__ import print_function%0A%0A%22%22%22Watch File generated by Scratch%0A 1. save Scratch file *.sb2 into the same directory or specify with path%0A 2. change name *.sb2 to *.zip%0A 3. unzip *.zip file and read json data from project.json%0A%22%22%22%0A%0A%0Aimport sys, time, logging, os, zipfile%0Aimport watchdog%0Afrom watchdog.observers import Observer%0Afrom watchdog.events import LoggingEventHandler%0A%0A%0Aclass MyFileMonitor(watchdog.events.FileSystemEventHandler):%0A def __init__(self, suffix, callback):%0A super(MyFileMonitor, self).__init__()%0A self.callback = callback%0A if suffix.startswith('.'):%0A self.suffix = suffix%5B1:%5D%0A else:%0A self.suffix = suffix%0A%0A def on_created(self, event):%0A super(MyFileMonitor, self).on_created(event)%0A n_suffix = event.src_path.split('.')%5B-1%5D%0A if not event.is_directory and n_suffix == self.suffix:%0A # when detected file created which we need , use callback to deal with%0A self.callback(event.src_path)%0A%0A%0Aclass WatchFile(object):%0A def __init__(self, *argv, **kargv):%0A self.path = kargv%5B'path'%5D if kargv.has_key('path') else '.'%0A self.suffix = kargv%5B'suffix'%5D if kargv.has_key('suffix') else '*' # star represent any file%0A self.observer = Observer()%0A self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)%0A%0A def run(self):%0A self.observer.schedule(self.event_handler, self.path, recursive=True)%0A self.observer.start()%0A try:%0A while True:%0A time.sleep(1)%0A except KeyboardInterrupt:%0A self.observer.stop()%0A self.observer.join()%0A%0A def get_data(self, filename):%0A return self._unpack(filename)%0A%0A def _unpack(self, filename):%0A # first rename suffix to zip file%0A # may not work on linux%0A new_name = filename.split('.')%5B1%5D + '.zip'%0A new_name = new_name%5B1:%5D if new_name.startswith('%5C%5C') else new_name%0A os.rename(filename, new_name)%0A%0A zip_file = zipfile.ZipFile(new_name, 'r')%0A json_data = %22%22%0A for name in zip_file.namelist():%0A if name == %22project.json%22:%0A file = open(name, 'r')%0A json_data = %22%22.join(file.readlines())%0A return json_data%0A%0A%0Aif __name__ == %22__main__%22:%0A wd = WatchFile(suffix=%22.sb2%22)%0A wd.run()%0A
|
|
46b3c0c024dd0d8dbb80911d04848571b3176be7 | add yaml config reader | config.py | config.py | Python | 0 | @@ -0,0 +1,2592 @@
+# -*- coding: utf-8 -*-%0A%0Aimport (os, sys, yaml)%0A%0Aclass Settings(dict):%0A ''' base settings class '''%0A def __init__( self, data = None ):%0A super( Settings, self ).__init__()%0A if data:%0A self.__update( data, %7B%7D )%0A%0A def __update( self, data, did ):%0A dataid = id(data)%0A did%5B dataid %5D = self%0A%0A for k in data:%0A dkid = id(data%5Bk%5D)%0A if did.has_key(dkid):%0A self%5Bk%5D = did%5Bdkid%5D%0A elif isinstance( data%5Bk%5D, Settings ):%0A self%5Bk%5D = data%5Bk%5D%0A elif isinstance( data%5Bk%5D, dict ):%0A obj = Settings()%0A obj.__update( data%5Bk%5D, did )%0A self%5Bk%5D = obj%0A obj = None%0A else:%0A self%5Bk%5D = data%5Bk%5D%0A%0A def __getitem__(self, item):%0A return self.__getattr__(item)%0A%0A def __getattr__( self, key ):%0A return self.get( key, None )%0A%0A def __setattr__( self, key, value ):%0A if isinstance(value,dict):%0A self%5Bkey%5D = Settings( value )%0A else:%0A self%5Bkey%5D = value%0A%0A def update( self, *args ):%0A for obj in args:%0A for k in obj:%0A if isinstance(obj%5Bk%5D,dict):%0A self%5Bk%5D = Settings( obj%5Bk%5D )%0A else:%0A self%5Bk%5D = obj%5Bk%5D%0A return self%0A%0A def merge( self, *args ):%0A for obj in args:%0A for k in obj:%0A if self.has_key(k):%0A if isinstance(self%5Bk%5D,list) and isinstance(obj%5Bk%5D,list):%0A self%5Bk%5D += obj%5Bk%5D%0A elif isinstance(self%5Bk%5D,list):%0A self%5Bk%5D.append( obj%5Bk%5D )%0A elif isinstance(obj%5Bk%5D,list):%0A self%5Bk%5D = %5Bself%5Bk%5D%5D + obj%5Bk%5D%0A elif isinstance(self%5Bk%5D,Settings) and isinstance(obj%5Bk%5D,Settings):%0A self%5Bk%5D.merge( obj%5Bk%5D )%0A elif isinstance(self%5Bk%5D,Settings) and isinstance(obj%5Bk%5D,dict):%0A self%5Bk%5D.merge( obj%5Bk%5D )%0A else:%0A self%5Bk%5D = %5B self%5Bk%5D, obj%5Bk%5D %5D%0A else:%0A if isinstance(obj%5Bk%5D,dict):%0A self%5Bk%5D = Settings( obj%5Bk%5D )%0A else:%0A self%5Bk%5D = obj%5Bk%5D%0A return self%0A%0A%0Adef load(config_file):%0A ''' load data from yaml file '''%0A %0A with open(config_file) as fd:%0A data = yaml.load(fd.read()) or dict()%0A return Settings(data)%0A%0Aif __name__ == '__main__':%0A ''' '''%0A %0A settings = load('./config.yaml')%0A
|
|
8b9fe74976d77df32d73792f74ef4ddea1eb525f | Add Config.get() to skip KeyErrors | config.py | config.py | #! /usr/bin/env python
import os
import warnings
import yaml
class Config(object):
config_fname = "configuration.yaml"
def __init__(self, config_fname=None):
config_fname = config_fname or self.config_fname
fo = open(config_fname, "r")
blob = fo.read()
fo.close()
self.config = yaml.load(blob)
def __getattr__(self, attrname):
if attrname == "slack_name":
warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" %
self.config_fname, DeprecationWarning)
return self.config[attrname]
# This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME
SLACK_NAME = os.getenv("SLACK_NAME")
if SLACK_NAME is None:
SLACK_NAME = Config().slack_name
| Python | 0 | @@ -639,16 +639,168 @@
rname%5D%0A%0A
+ def get(self, attrname, fallback=None):%0A try:%0A return self.config%5Battrname%5D%0A except KeyError:%0A return fallback%0A%0A
%0A# This
|
7dde102dd51db08f9021234fa3d8f11ab165b210 | add custom_preprocess.py | src/custom_preprocess.py | src/custom_preprocess.py | Python | 0.000001 | @@ -0,0 +1,954 @@
+import unittest%0Aimport csv%0Afrom datetime import datetime, timedelta%0A%0A%0Adef load_raw_data_and_split_by_dt(path, output_dir):%0A base_datetime = datetime.strptime('141021', '%25y%25m%25d')%0A output_file_dict = %7B(base_datetime + timedelta(days=x)).strftime('%25y%25m%25d'): open(%0A output_dir + '/' + (base_datetime + timedelta(days=x)).strftime('%25y%25m%25d') + '.csv', 'w') for x in range(0, 10)%7D%0A%0A with open(path, 'rb') as csvfile:%0A header = csvfile.readline()%0A reader = csv.reader(csvfile, delimiter=',')%0A for row in reader:%0A hour_column = row%5B2%5D%0A dt = hour_column%5B:6%5D%0A hour = hour_column%5B6:%5D%0A output_file_dict%5Bdt%5D.write(%22,%22.join(row%5B:2%5D + %5Bhour%5D + row%5B3:%5D) + %22%5Cn%22)%0A%0A%0Aclass TestCustomPreprocess(unittest.TestCase):%0A def test_load_raw_data_and_split_by_dt(self):%0A load_raw_data_and_split_by_dt('../fixtures/train.thumb', '../fixtures')%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
1f5134b36846cf0e5e936888a4fe51a2012e0d78 | Create alternate_disjoint_set.py (#2302) | data_structures/disjoint_set/alternate_disjoint_set.py | data_structures/disjoint_set/alternate_disjoint_set.py | Python | 0 | @@ -0,0 +1,2192 @@
+%22%22%22%0AImplements a disjoint set using Lists and some added heuristics for efficiency%0AUnion by Rank Heuristic and Path Compression%0A%22%22%22%0A%0A%0Aclass DisjointSet:%0A def __init__(self, set_counts: list) -%3E None:%0A %22%22%22%0A Initialize with a list of the number of items in each set%0A and with rank = 1 for each set%0A %22%22%22%0A self.set_counts = set_counts%0A self.max_set = max(set_counts)%0A num_sets = len(set_counts)%0A self.ranks = %5B1%5D * num_sets%0A self.parents = list(range(num_sets))%0A%0A def merge(self, src: int, dst: int) -%3E bool:%0A %22%22%22%0A Merge two sets together using Union by rank heuristic%0A Return True if successful%0A Merge two disjoint sets%0A %3E%3E%3E A = DisjointSet(%5B1, 1, 1%5D)%0A %3E%3E%3E A.merge(1, 2)%0A True%0A %3E%3E%3E A.merge(0, 2)%0A True%0A %3E%3E%3E A.merge(0, 1)%0A False%0A %22%22%22%0A src_parent = self.get_parent(src)%0A dst_parent = self.get_parent(dst)%0A%0A if src_parent == dst_parent:%0A return False%0A%0A if self.ranks%5Bdst_parent%5D %3E= self.ranks%5Bsrc_parent%5D:%0A self.set_counts%5Bdst_parent%5D += self.set_counts%5Bsrc_parent%5D%0A self.set_counts%5Bsrc_parent%5D = 0%0A self.parents%5Bsrc_parent%5D = dst_parent%0A if self.ranks%5Bdst_parent%5D == self.ranks%5Bsrc_parent%5D:%0A self.ranks%5Bdst_parent%5D += 1%0A joined_set_size = self.set_counts%5Bdst_parent%5D%0A else:%0A self.set_counts%5Bsrc_parent%5D += self.set_counts%5Bdst_parent%5D%0A self.set_counts%5Bdst_parent%5D = 0%0A self.parents%5Bdst_parent%5D = src_parent%0A joined_set_size = self.set_counts%5Bsrc_parent%5D%0A%0A self.max_set = max(self.max_set, joined_set_size)%0A return True%0A%0A def get_parent(self, disj_set: int) -%3E int:%0A %22%22%22%0A Find the Parent of a given set%0A %3E%3E%3E A = DisjointSet(%5B1, 1, 1%5D)%0A %3E%3E%3E A.merge(1, 2)%0A True%0A %3E%3E%3E A.get_parent(0)%0A 0%0A %3E%3E%3E A.get_parent(1)%0A 2%0A %22%22%22%0A if self.parents%5Bdisj_set%5D == disj_set:%0A return disj_set%0A self.parents%5Bdisj_set%5D = self.get_parent(self.parents%5Bdisj_set%5D)%0A return self.parents%5Bdisj_set%5D%0A
|
|
4c53ffbd9b23238b3402752f33fcabb2724921f4 | Add dunder init for lowlevel. | astrodynamics/lowlevel/__init__.py | astrodynamics/lowlevel/__init__.py | Python | 0 | @@ -0,0 +1,81 @@
+# coding: utf-8%0Afrom __future__ import absolute_import, division, print_function%0A
|
|
94f922c77ee89a5b54b99e135a5045f450badb0e | add new script to dump nice looking release notes like. Borrowed from antlr. | scripts/github_release_notes.py | scripts/github_release_notes.py | Python | 0 | @@ -0,0 +1,1840 @@
+# Get github issues / PR for a release%0A# Exec with %22python github_release_notes.py YOUR_GITHUB_API_ACCESS_TOKEN 1.19%22%0A%0Aimport sys%0Afrom collections import Counter%0Afrom github import Github%0A%0ATOKEN=sys.argv%5B1%5D%0AMILESTONE=sys.argv%5B2%5D%0Ag = Github(login_or_token=TOKEN)%0A%0A# Then play with your Github objects:%0Aorg = g.get_organization(%22antlr%22)%0Arepo = org.get_repo(%22intellij-plugin-v4%22)%0Amilestone = %5Bx for x in repo.get_milestones() if x.title==MILESTONE%5D%0Amilestone = milestone%5B0%5D%0A%0Aissues = repo.get_issues(state=%22closed%22, milestone=milestone, sort=%22created%22, direction=%22desc%22)%0A%0A# dump bugs fixed%0Aprint()%0Aprint(%22## Issues fixed%22)%0Afor x in issues:%0A labels = %5Bl.name for l in x.labels%5D%0A if x.pull_request is None and not (%22type:improvement%22 in labels or %22type:feature%22 in labels):%0A print(%22* %5B%25s%5D(%25s) (%25s)%22 %25 (x.title, x.html_url, %22, %22.join(%5Bl.name for l in x.labels%5D)))%0A%0A# dump improvements closed for this release (issues or pulls)%0Aprint()%0Aprint(%22## Improvements, features%22)%0Afor x in issues:%0A labels = %5Bl.name for l in x.labels%5D%0A if (%22type:enhancement%22 in labels or %22type:feature%22 in labels):%0A print(%22* %5B%25s%5D(%25s) (%25s)%22 %25 (x.title, x.html_url, %22, %22.join(labels)))%0A%0A# dump PRs closed for this release%0Aprint()%0Aprint(%22## Pull requests%22)%0Afor x in issues:%0A labels = %5Bl.name for l in x.labels%5D%0A if x.pull_request is not None:%0A print(%22* %5B%25s%5D(%25s) (%25s)%22 %25 (x.title, x.html_url, %22, %22.join(labels)))%0A%0A# dump contributors%0Aprint()%0Aprint(%22## Contributors%22)%0Auser_counts = Counter(%5Bx.user.login for x in issues%5D)%0Ausers = %7Bx.user.login:x.user for x in issues%7D%0Afor login,count in user_counts.most_common(10000):%0A name = users%5Blogin%5D.name%0A logins = f%22 (%7Busers%5Blogin%5D.login%7D)%22%0A if name is None:%0A name = users%5Blogin%5D.login%0A logins = %22%22%0A print(f%22* %7Bcount:3d%7D items: %5B%7Bname%7D%5D(%7Busers%5Blogin%5D.html_url%7D)%7Blogins%7D%22)%0A
|
|
fe08ce77958c637539b24817ffca45587fa31a7e | Implement shared API | platformio/shared.py | platformio/shared.py | Python | 0.00001 | @@ -0,0 +1,1324 @@
+# Copyright (c) 2014-present PlatformIO %[email protected]%3E%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A# pylint: disable=unused-import%0A%0Afrom platformio.device.filters.base import DeviceMonitorFilterBase%0Afrom platformio.device.list import list_serial_ports%0Afrom platformio.fs import to_unix_path%0Afrom platformio.platform.base import PlatformBase%0Afrom platformio.project.config import ProjectConfig%0Afrom platformio.project.helpers import load_build_metadata%0Afrom platformio.test.result import TestCase, TestCaseSource, TestStatus%0Afrom platformio.test.runners.base import TestRunnerBase%0Afrom platformio.test.runners.doctest import DoctestTestCaseParser%0Afrom platformio.test.runners.googletest import GoogletestTestRunner%0Afrom platformio.test.runners.unity import UnityTestRunner%0Afrom platformio.util import get_systype%0A
|
|
42297354f575e2c82346cf033202c5dfad5ddd99 | Add python class for writing out xyz files of trajectory coordinates | lib/examples/nacl_amb/utils.py | lib/examples/nacl_amb/utils.py | Python | 0.000072 | @@ -0,0 +1,1613 @@
+#!/usr/bin/env python%0Aimport numpy%0A%0Aclass TrajWriter(object):%0A '''%0A A class for writing out trajectory traces as an xyz file, for subsequent%0A visualization.%0A '''%0A def __init__(self, trace, w, filename='trace.xyz'):%0A self.trace = trace%0A self.w = w%0A self.filename = filename%0A self._write()%0A%0A def _get_coords(self, iteration, seg_id):%0A self.w.iteration = iteration%0A coords = self.w.current.auxdata%5B'coord'%5D%5Bseg_id%5D%0A return coords%0A%0A def _write(self):%0A all_coords = %5B%5D%0A starting_iteration = self.w.iteration %0A for i, iteration in enumerate(self.trace.iteration):%0A seg_id = self.trace.seg_id%5Bi%5D%0A coords = self._get_coords(iteration, seg_id)%0A # The last timepoint of one iteration is the same as the first%0A # timepoint of the last, so skip the last timepoint of each %0A # iteration%0A coords = coords%5B:-1%5D%0A all_coords.append(coords)%0A self.w.iteration = starting_iteration%0A all_coords = numpy.concatenate(all_coords)%0A with open(self.filename, 'w') as outfile:%0A for i, frame in enumerate(all_coords):%0A outfile.write(%222%5Cn%22)%0A outfile.write(%22%7B0%7D%5Cn%22.format(i))%0A outfile.write(%22SOD %7B0:9.5f%7D %7B1:9.5f%7D %7B2:9.5f%7D%5Cn%22.format(%0A float(frame%5B0,0%5D), float(frame%5B0,1%5D), float(frame%5B0,2%5D)))%0A outfile.write(%22CLA %7B0:9.5f%7D %7B1:9.5f%7D %7B2:9.5f%7D%5Cn%22.format(%0A float(frame%5B1,0%5D), float(frame%5B1,1%5D), float(frame%5B1,2%5D)))%0A %0A %0A %0A %0A
|
|
88bd6466940d21d52c0d5235ace10b6a97d69d46 | Create emailtoHIBP.py | emailtoHIBP.py | emailtoHIBP.py | Python | 0 | @@ -0,0 +1,783 @@
+#!/usr/bin/python%0A %0A#EmailtoHIBP.py%0A#Author: Sudhanshu Chauhan - @Sudhanshu_C%0A %0A#This Script will retrieve the Domain(s) at which the specified account has been compromised%0A#It uses the API provided by https://haveibeenpwned.com/%0A#Special Thanks to Troy Hunt - http://www.troyhunt.com/%0A#For MaltegoTransform library and Installation guidelines go to http://www.paterva.com/web6/documentation/developer-local.php%0A %0Afrom MaltegoTransform import *%0Aimport sys%0Aimport urllib2%0Amt = MaltegoTransform();%0Amt.parseArguments(sys.argv);%0Aemail=mt.getValue();%0Amt = MaltegoTransform()%0Ahibp=%22https://haveibeenpwned.com/api/breachedaccount/%22%0Agetrequrl=hibp+email%0Aresponse = urllib2.urlopen(getrequrl)%0Afor rep in response:%0A mt.addEntity(%22maltego.Phrase%22,%22Pwned at %22 + rep)%0Amt.returnOutput()%0A%0A
|
|
084a6c11a6e74af7835502a78028f19db6423265 | simplify byte_unit conversion into a list | src/diamond/collector.py | src/diamond/collector.py | # coding=utf-8
"""
The Collector class is a base class for all metric collectors.
"""
import os
import socket
import platform
import logging
import configobj
import traceback
from diamond.metric import Metric
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config, handlers):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
self.name = self.__class__.__name__
self.handlers = handlers
self.last_values = {}
# Get Collector class
cls = self.__class__
# Initialize config
self.config = configobj.ConfigObj()
# Check if default config is defined
if self.get_default_config() is not None:
# Merge default config
self.config.merge(self.get_default_config())
# Merge default Collector config
self.config.merge(config['collectors']['default'])
# Check if Collector config section exists
if cls.__name__ in config['collectors']:
# Merge Collector config section
self.config.merge(config['collectors'][cls.__name__])
# Check for config file in config directory
configfile = os.path.join(config['server']['collectors_config_path'],
cls.__name__) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
self.config.merge(configobj.ConfigObj(configfile))
# Handle some config file changes transparently
if isinstance(self.config['byte_unit'], basestring):
units = self.config['byte_unit'].split()
self.config['byte_unit'] = []
for unit in units:
self.config['byte_unit'].append(unit)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
### Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Suffix
'path_suffix': '',
# Default splay time (seconds)
'splay': 1,
# Default Poll Interval (seconds)
'interval': 300,
# Default collector threading model
'method': 'Sequential',
# Default numeric output
'byte_unit': 'byte',
}
def get_schedule(self):
"""
Return schedule for the collector
"""
# Return a dict of tuples containing (collector function,
# collector function args, splay, interval)
return {self.__class__.__name__: (self._run,
None,
int(self.config['splay']),
int(self.config['interval']))}
def get_hostname(self):
"""
Returns a hostname as configured by the user
"""
if 'hostname' in self.config:
return self.config['hostname']
if ('hostname_method' not in self.config
or self.config['hostname_method'] == 'fqdn_short'):
return socket.getfqdn().split('.')[0]
if self.config['hostname_method'] == 'fqdn':
return socket.getfqdn().replace('.', '_')
if self.config['hostname_method'] == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
return hostname
if self.config['hostname_method'] == 'uname_short':
return os.uname()[1].split('.')[0]
if self.config['hostname_method'] == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
return hostname
if self.config['hostname_method'].lower() == 'none':
return None
raise NotImplementedError(self.config['hostname_method'])
def get_metric_path(self, name):
"""
Get metric path
"""
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = self.get_hostname()
if hostname is not None:
prefix = prefix + "." + hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if path == '.':
return '.'.join([prefix, name])
else:
return '.'.join([prefix, path, name])
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, precision=0):
"""
Publish a metric with the given name
"""
# Get metric Path
path = self.get_metric_path(name)
# Create Metric
metric = Metric(path, value, None, precision)
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler.process(metric)
def derivative(self, name, new, max_value=0):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# Get Change in Y (time)
derivative_y = int(self.config['interval'])
result = float(derivative_x) / float(derivative_y)
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector
"""
# Log
self.log.debug("Collecting data from: %s" % self.__class__.__name__)
try:
# Collect Data
self.collect()
except Exception:
# Log Error
self.log.error(traceback.format_exc())
| Python | 0.002396 | @@ -2031,137 +2031,34 @@
-units = self.config%5B'byte_unit'%5D.split()%0A self.config%5B'byte_unit'%5D = %5B%5D%0A for unit in units:%0A
+self.config%5B'byte_unit'%5D =
sel
@@ -2083,19 +2083,14 @@
t'%5D.
-append(un
+spl
it
+(
)%0A%0A
|
450557e0bfb902de862e5fe42868d3fbf7165600 | Add lc0983_minimum_cost_for_tickets.py from Hotel Schulz Berlin | lc0983_minimum_cost_for_tickets.py | lc0983_minimum_cost_for_tickets.py | Python | 0.000001 | @@ -0,0 +1,2024 @@
+%22%22%22Leetcode 983. Minimum Cost For Tickets%0AMedium%0A%0AURL: https://leetcode.com/problems/minimum-cost-for-tickets/%0A%0AIn a country popular for train travel, you have planned some train travelling%0Aone year in advance. The days of the year that you will travel is given as %0Aan array days. Each day is an integer from 1 to 365.%0A%0ATrain tickets are sold in 3 different ways:%0A- a 1-day pass is sold for costs%5B0%5D dollars;%0A- a 7-day pass is sold for costs%5B1%5D dollars;%0A- a 30-day pass is sold for costs%5B2%5D dollars.%0AThe passes allow that many days of consecutive travel.%0AFor example, if we get a 7-day pass on day 2, then we can travel for 7 days: %0Aday 2, 3, 4, 5, 6, 7, and 8.%0A%0AReturn the minimum number of dollars you need to travel every day in the given%0Alist of days.%0A%0AExample 1:%0AInput: days = %5B1,4,6,7,8,20%5D, costs = %5B2,7,15%5D%0AOutput: 11%0AExplanation: %0AFor example, here is one way to buy passes that lets you travel your travel plan:%0AOn day 1, you bought a 1-day pass for costs%5B0%5D = $2, which covered day 1.%0AOn day 3, you bought a 7-day pass for costs%5B1%5D = $7, which covered days 3, 4, ..., 9.%0AOn day 20, you bought a 1-day pass for costs%5B0%5D = $2, which covered day 20.%0AIn total you spent $11 and covered all the days of your travel.%0A%0AExample 2:%0AInput: days = %5B1,2,3,4,5,6,7,8,9,10,30,31%5D, costs = %5B2,7,15%5D%0AOutput: 17%0AExplanation: %0AFor example, here is one way to buy passes that lets you travel your travel plan:%0AOn day 1, you bought a 30-day pass for costs%5B2%5D = $15 which covered days 1, 2, ..., 30.%0AOn day 31, you bought a 1-day pass for costs%5B0%5D = $2 which covered day 31.%0AIn total you spent $17 and covered all the days of your travel.%0A%0ANote:%0A- 1 %3C= days.length %3C= 365%0A- 1 %3C= days%5Bi%5D %3C= 365%0A- days is in strictly increasing order.%0A- costs.length == 3%0A- 1 %3C= costs%5Bi%5D %3C= 1000%0A%22%22%22%0A%0A%0Aclass Solution(object):%0A def mincostTickets(self, days, costs):%0A %22%22%22%0A :type days: List%5Bint%5D%0A :type costs: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
a737126f8f8bcac1a00999f9e5c2a23bca9efd0d | Create hamming.py | hamming.py | hamming.py | Python | 0.000001 | @@ -0,0 +1,1337 @@
+#Python Problem 2%0A#hamming.py%0A#Introduction to Bioinformatics Assignment 2%0A#Purpose:Calculate Hamming Distance%0A#Your Name: Michael Thomas%0A#Date: 10/10/15%0A%0A#stores 3 database sequences%0AseqList = %5B%22AGGATACAGCGGCTTCTGCGCGACAAATAAGAGCTCCTTGTAAAGCGCCAAAAAAAGCCTCTCGGTCTGTGGCAGCAGCGTTGGCCCGGCCCCGGGAGCGGAGAGCGAGGGGAGGCAGATTCGGAGGAAGGTCTGAAAAG%22,%0A %22AAAATACAGGGGGTTCTGCGCGACTTATGGGAGCTCCTTGTGCGGCGCCATTTTAAGCCTCACAGACTATGGCAGCAGCGTTGGCCCGGCAAAAGGAGCGGAGAGCGAGGGGAGGCGGAGACGGACGAAGGTCTGAGCAG%22,%0A %22CCCATACAGCCGCTCCTCCGCGACTTATAAGAGCTCCTTGTGCGGCGCCATTTTAAGCCTCTCGGTCTGTGGCAGCAGCGTTGGCCCGCCCAAAACAGCGGAGAGCGAGGGGAGGCGGAGACGGAGGAAGGTCTGAGCAG%22%5D%0A#your query sequence%0As1 = %22AGGATACAGCGGCTTCTGCGCGACTTATAAGAGCTCCTTGTGCGGCGCCATTTTAAGCCTCTCGGTCTGTGGCAGCAGCGTTGGCCCGGCCCCGGGAGCGGAGAGCGAGGGGAGGCGGAGACGGAGGAAGGTCTGAGGAG%22%0Acount=%5B0,0,0%5D%0A#outer loop to go through seqList%5B%5D%0Afor i in range(len(seqList)):%0A%09#save each string to iterate trough on secondary loop%0A%09seqi = seqList%5Bi%5D%0A%09#checks for non-matches between s1 and seqi and iterates count%0A%09for j in range(len(s1)):%0A%09%09if s1%5Bj%5D != seqi%5Bj%5D:%0A%09%09%09count%5Bi%5D = count%5Bi%5D + 1%0A#Results %0A#hamming distance for each sequence%0Aprint %22The Hamming distance dh(s1,seqList%5B0%5D) =%22, count%5B0%5D%0Aprint %22The Hamming distance dh(s1,seqList%5B1%5D) = %22, count%5B1%5D%0Aprint %22The Hamming distance dh(s1,seqList%5B2%5D) = %22, count%5B2%5D%0A
|
|
290239e45b5a4eae88a5c92304b46bf74a04b616 | update Enclosure.mime docstring [skip ci] | mailthon/enclosure.py | mailthon/enclosure.py | """
mailthon.enclosure
~~~~~~~~~~~~~~~~~~
Implements Enclosure objects- parts that collectively
make up body of the email.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
from email.encoders import encode_base64
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from os.path import basename
from .helpers import guess
from .headers import Headers, content_disposition
class Enclosure(object):
"""
Base class for Enclosure objects to inherit from.
An enclosure is a part of the enclosure in a real
envelope- it contains part of the content to be
sent.
:param headers: Iterable of headers to include,
stored in an RFC-compliant Headers mapping
internally under the headers attribute.
"""
def __init__(self, headers=()):
self.headers = Headers(headers)
def mime_object(self):
"""
To be overriden. Returns the generated MIME
object, without applying the internal headers.
"""
raise NotImplementedError
def mime(self):
"""
Returns the finalised mime object, after
applying the internal headers.
"""
mime = self.mime_object()
self.headers.prepare(mime)
return mime
class PlainText(Enclosure):
"""
Enclosure that has a text/plain mimetype.
:param content: Unicode or bytes string.
:param encoding: Encoding used to serialize the
content or the encoding of the content.
:param headers: Optional headers.
"""
subtype = 'plain'
def __init__(self, content, encoding='utf-8', **kwargs):
Enclosure.__init__(self, **kwargs)
self.content = content
self.encoding = encoding
def mime_object(self):
return MIMEText(self.content,
self.subtype,
self.encoding)
class HTML(PlainText):
"""
Subclass of PlainText with a text/html mimetype.
"""
subtype = 'html'
class Binary(Enclosure):
"""
An Enclosure subclass for binary content. If the
content is HTML or any kind of plain-text then
the HTML or PlainText Enclosures are receommended
since they have a simpler interface.
:param content: A bytes string.
:param mimetype: Mimetype of the content.
:param encoding: Optional encoding of the content.
:param encoder: An optional encoder_ function.
:param headers: Optional headers.
.. _encoder: https://docs.python.org/2/library/email.encoders.html
"""
def __init__(self, content, mimetype, encoding=None,
encoder=encode_base64, **kwargs):
Enclosure.__init__(self, **kwargs)
self.content = content
self.mimetype = mimetype
self.encoding = encoding
self.encoder = encoder
def mime_object(self):
mime = MIMEBase(*self.mimetype.split('/'))
mime.set_payload(self.content)
if self.encoding:
del mime['Content-Type']
mime.add_header('Content-Type',
self.mimetype,
charset=self.encoding)
self.encoder(mime)
return mime
class Attachment(Binary):
"""
Binary subclass for easier file attachments.
Basically using this class has the advantage
that fetching the file contents is lazy, which
may be desired. Else use the Binary class. Also,
the Content-Disposition header is automatically
set.
:param path: Absolute/Relative path to the file.
:param headers: Optional headers.
"""
def __init__(self, path, headers=()):
self.path = path
self.mimetype, self.encoding = guess(path)
self.encoder = encode_base64
heads = dict([content_disposition('attachment', basename(path))])
heads.update(headers)
self.headers = Headers(heads)
@property
def content(self):
"""
Lazily returns the bytes contents of the file.
"""
with open(self.path, 'rb') as handle:
return handle.read()
| Python | 0 | @@ -1221,32 +1221,77 @@
nternal headers.
+ Usually this%0A is not to be overriden.
%0A %22%22%22%0A
|
7b73c957ad52f9b846955b96b7cc6d0938587bb3 | Add 3rd order covariance | src/conventional/cum3est.py | src/conventional/cum3est.py | Python | 0.999372 | @@ -0,0 +1,1993 @@
+#!/usr/bin/env python%0A%0Afrom __future__ import division%0Aimport numpy as np%0Afrom scipy.linalg import hankel%0Aimport scipy.io as sio%0Aimport matplotlib.pyplot as plt%0A%0Afrom tools import *%0A%0A%0Adef cum3est(y, maxlag, nsamp, overlap, flag, k1):%0A %22%22%22%0A UM3EST Third-order cumulants.%0A Should be invoked via %22CUMEST%22 for proper parameter checks%0A Parameters:%0A y: input data vector (column)%0A maxlag: maximum lag to be computed%0A samp_seg: samples per segment%0A overlap: percentage overlap of segments%0A flag : 'biased', biased estimates are computed %5Bdefault%5D%0A 'unbiased', unbiased estimates are computed.%0A k1: the fixed lag in c3(m,k1): see below%0A%0A Output:%0A y_cum: estimated third-order cumulant,%0A C3(m,k1) -maxlag %3C= m %3C= maxlag%0A %22%22%22%0A%0A (n1,n2) = np.shape(y)%0A N = n1*n2%0A minlag = -maxlag%0A overlap = np.fix(overlap/100 * nsamp)%0A nrecord = np.fix((N - overlap)/(nsamp - overlap))%0A nadvance = nsamp - overlap%0A%0A y_cum = np.zeros(%5Bmaxlag-minlag+1,1%5D)%0A%0A nd = np.arange(nsamp).T%0A nlags = 1*maxlag + 1%0A zlag = 1 + maxlag%0A if flag == 'biased':%0A scale = np.ones(%5Bnlags, 1%5D)/nsamp%0A else:%0A lsamp = nsamp - abs(k1)%0A scale = make_arr((range(lsamp-maxlag, lsamp), range(lsamp, lsamp-maxlag,-1)), axis=1)%0A (m2,n2) = scale.shape%0A scale = np.ones(%5Bm2,n2%5D) / scale%0A%0A y = y.ravel(order='F')%0A for i in xrange(nrecord):%0A x = y%5Bind%5D%0A x = x.ravel(order='F') - mean(x)%0A cx = np.conj(x)%0A z = x * 0%0A%0A # create the %22IV%22 matrix: offset for second lag%0A if k1 %3E 0:%0A z%5Bq:nsamp-k1%5D = x%5B0:nsamp-k1, :%5D * cx%5Bk1:nsamp, :%5D%0A else:%0A z%5B-k1:nsamp%5D = x%5B-k1:nsamp%5D * cx%5B0:nsamp+k1%5D%0A%0A # compute third-order cumulants%0A y_cum%5Bzlag%5D = y_cum%5Bzlag%5D + (z.T * x)%0A%0A for k in xrange(maxlag):%0A y_cum%5Bzlag-k%5D = y_cum%5Bzlag-k%5D + z%5Bk:nsamp%5D.T * x%5B0:nsamp-k%5D%0A y_cum%5Bzlag+k%5D = y_cum%5Bzlag+k%5D + z%5B0:nsamp-k%5D.T * x%5Bk:nsamp%5D%0A%0A ind = ind + int(nadvance)%0A%0A y_cum = y_cum * scale/nrecord%0A%0A return y_cum%0A%0A
|
|
ded893c34db0c6de521e6d735d6fce30f16f3a51 | Add WSGI file. | noodleamp.wsgi | noodleamp.wsgi | Python | 0 | @@ -0,0 +1,620 @@
+import os%0Aimport pwd%0Aimport sys%0A%0A%0ABASE_DIR = os.path.dirname(os.path.abspath(__file__))%0A%0A%0Adef path(*paths):%0A return os.path.join(BASE_DIR, *paths)%0A%0A%0Aos.environ%5B'NOODLEAMP_CONFIG'%5D = path('settings_local.py')%0A# http://code.google.com/p/modwsgi/wiki/ApplicationIssues#User_HOME_Environment_Variable%0Aos.environ%5B'HOME'%5D = pwd.getpwuid(os.getuid()).pw_dir%0A%0Aactivate_this = path('venv/bin/activate_this.py')%0Aexecfile(activate_this, dict(__file__=activate_this))%0A%0A%0ABASE_DIR = os.path.join(os.path.dirname(__file__))%0Aif BASE_DIR not in sys.path:%0A sys.path.append(BASE_DIR)%0A%0Afrom noodleamp.server import app as application%0A
|
|
33393fcfcca30edafcf06df53550f4985033c459 | Add numba error module | numba/error.py | numba/error.py | Python | 0 | @@ -0,0 +1,73 @@
+class NumbaError(Exception):%0A %22Some error happened during compilation%22
|
|
5784158855eba090c24bb93ece991fba3b1e1a67 | Add never_cache to views | radmin/views.py | radmin/views.py | from django.http import HttpResponse
from django.contrib.admin.views.decorators import staff_member_required
from django.utils import simplejson as json
from radmin.console import REGISTERED_NAMED_ITEMS, REGISTERED_TO_ALL
from radmin.utils import *
@staff_member_required
def entry_point(request):
""" This is the entry point for radmin console."""
if request.is_ajax():
# grab get params
location = request.GET.get('location', None) # where we are in the admin site
param1 = request.GET.get('param1', None) # usually specifics about location, app_name or model_name etc
param2 = request.GET.get('param2', None) # and additional field, can carry model id
controls = []
# first lets do the globally registered controls
for key,value in REGISTERED_TO_ALL.items():
controls.append({'label':value['label'],'target':key})
# check for admin_index stuff
if location in REGISTERED_NAMED_ITEMS:
value = REGISTERED_NAMED_ITEMS[location]
controls.append({'label':value['label'],'target':location, 'data':param2})
if param1 in REGISTERED_NAMED_ITEMS:
value = REGISTERED_NAMED_ITEMS[param1]
controls.append({'label':value['label'],'target':param1, 'data':param2})
return HttpResponse(json.dumps(controls), mimetype="application/json")
@staff_member_required
def runner(request):
if request.is_ajax():
target = request.GET.get('target')
data = request.GET.get('data', None)
# now we have to do a look up and see if the target exists in commands dict
if target in REGISTERED_NAMED_ITEMS:
console_item = REGISTERED_NAMED_ITEMS[target]
mod = radmin_import(console_item['callback'])
if mod:
try:
if data:
output = mod(data)
else:
output = mod()
result = {'result':'success', 'output':output, 'display_result':console_item['display_result']}
except Exception as e:
result = {'result':'error', 'output':e, 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
result = {'result':'error', 'output':'No Module Found', 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
elif target in REGISTERED_TO_ALL:
console_item = REGISTERED_TO_ALL[target]
mod = radmin_import(console_item['callback'])
if mod:
try:
result = {'result':'success', 'output':mod(),'display_result':console_item['display_result']}
except Exception as e:
result = {'result':'error', 'output':e, 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
result = {'result':'error', 'output':'No Module Found', 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
return HttpResponse(json.dumps({'result':'not_found_error'}), mimetype="application/json")
def sample():
return "Hi there!"
| Python | 0 | @@ -146,16 +146,70 @@
as json%0A
+from django.views.decorators.cache import never_cache%0A
from rad
@@ -297,16 +297,29 @@
port *%0A%0A
+@never_cache%0A
@staff_m
@@ -1444,16 +1444,29 @@
json%22)%0A%0A
+@never_cache%0A
@staff_m
|
c1e801798d3b7e8d4c9ba8a11f79ffa92bf182f5 | Add test cases for the logger | test/test_logger.py | test/test_logger.py | Python | 0.000002 | @@ -0,0 +1,1038 @@
+# encoding: utf-8%0A%0A%22%22%22%0A.. codeauthor:: Tsuyoshi Hombashi %[email protected]%3E%0A%22%22%22%0A%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0A%0Aimport logbook%0Afrom pingparsing import (%0A set_logger,%0A set_log_level,%0A)%0Aimport pytest%0A%0A%0Aclass Test_set_logger(object):%0A%0A @pytest.mark.parametrize(%5B%22value%22%5D, %5B%0A %5BTrue%5D,%0A %5BFalse%5D,%0A %5D)%0A def test_smoke(self, value):%0A set_logger(value)%0A%0A%0Aclass Test_set_log_level(object):%0A%0A @pytest.mark.parametrize(%5B%22value%22%5D, %5B%0A %5Blogbook.CRITICAL%5D,%0A %5Blogbook.ERROR%5D,%0A %5Blogbook.WARNING%5D,%0A %5Blogbook.NOTICE%5D,%0A %5Blogbook.INFO%5D,%0A %5Blogbook.DEBUG%5D,%0A %5Blogbook.TRACE%5D,%0A %5Blogbook.NOTSET%5D,%0A %5D)%0A def test_smoke(self, value):%0A set_log_level(value)%0A%0A @pytest.mark.parametrize(%5B%22value%22, %22expected%22%5D, %5B%0A %5BNone, LookupError%5D,%0A %5B%22unexpected%22, LookupError%5D,%0A %5D)%0A def test_exception(self, value, expected):%0A with pytest.raises(expected):%0A set_log_level(value)%0A
|
|
59aa8fbba193e7b9e1e63a146f8d59eefda48dea | Version bump. | mezzanine/__init__.py | mezzanine/__init__.py |
__version__ = "0.8.3"
| Python | 0 | @@ -17,7 +17,7 @@
0.8.
-3
+4
%22%0A
|
c559cdd34a2dc8f3129c1fed5235291f22329368 | install crontab | install_crontab.py | install_crontab.py | Python | 0.000001 | @@ -0,0 +1,665 @@
+#!/usr/bin/python2%0A%0Afrom crontab import CronTab%0Aimport sys%0A%0ACRONTAB_TAG = %22ubuntu-cleanup-annoifier%22%0A%0Adef install_cron():%0A my_cron = CronTab(user=True)%0A %0A# job = my_cron.new(command=executable_path(args))%0A job = my_cron.new(command=%22dummy123%22)%0A job.minute.on(0)%0A job.hour.on(0)%0A job.enable()%0A job.set_comment(CRONTAB_TAG)%0A %0A my_cron.write_to_user( user=True )%0A%0Adef uninstall_cron():%0A my_cron = CronTab(user=True)%0A my_cron.remove_all(comment=CRONTAB_TAG)%0A my_cron.write_to_user( user=True )%0A%0Aif __name__ == %22__main__%22:%0A if sys.argv%5B1%5D == %22i%22:%0A install_cron()%0A elif sys.argv%5B1%5D == %22u%22:%0A uninstall_cron()%0A%0A%0A%0A
|
|
ca8a7320cbec1d4fa71ec5a7f909908b8765f573 | Allow underscores for release tags (#4976) | test_utils/scripts/circleci/get_tagged_package.py | test_utils/scripts/circleci/get_tagged_package.py | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to determine package from tag.
Get the current package directory corresponding to the Circle Tag.
"""
from __future__ import print_function
import os
import re
import sys
TAG_RE = re.compile(r"""
^
(?P<pkg>
(([a-z]+)-)*) # pkg-name-with-hyphens- (empty allowed)
([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints)
$
""", re.VERBOSE)
TAG_ENV = 'CIRCLE_TAG'
ERROR_MSG = '%s env. var. not set' % (TAG_ENV,)
BAD_TAG_MSG = 'Invalid tag name: %s. Expected pkg-name-x.y.z'
CIRCLE_CI_SCRIPTS_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.realpath(
os.path.join(CIRCLE_CI_SCRIPTS_DIR, '..', '..', '..'))
def main():
"""Get the current package directory.
Prints the package directory out so callers can consume it.
"""
if TAG_ENV not in os.environ:
print(ERROR_MSG, file=sys.stderr)
sys.exit(1)
tag_name = os.environ[TAG_ENV]
match = TAG_RE.match(tag_name)
if match is None:
print(BAD_TAG_MSG % (tag_name,), file=sys.stderr)
sys.exit(1)
pkg_name = match.group('pkg')
if pkg_name is None:
print(ROOT_DIR)
else:
pkg_dir = pkg_name.rstrip('-').replace('-', '_')
print(os.path.join(ROOT_DIR, pkg_dir))
if __name__ == '__main__':
main()
| Python | 0 | @@ -818,22 +818,15 @@
z%5D+)
--)*)
+%5B_-%5D)*)
#
@@ -847,16 +847,30 @@
hyphens-
+or-underscores
(empty
|
74e24debf55b003f1d56d35f4b040d91a0698e0a | Add example for cluster centroids method | example/under-sampling/plot_cluster_centroids.py | example/under-sampling/plot_cluster_centroids.py | Python | 0.000001 | @@ -0,0 +1,1905 @@
+%22%22%22%0A=================%0ACluster centroids%0A=================%0A%0AAn illustration of the cluster centroids method.%0A%0A%22%22%22%0A%0Aprint(__doc__)%0A%0Aimport matplotlib.pyplot as plt%0Aimport seaborn as sns%0Asns.set()%0A%0A# Define some color for the plotting%0Aalmost_black = '#262626'%0Apalette = sns.color_palette()%0A%0Afrom sklearn.datasets import make_classification%0Afrom sklearn.decomposition import PCA%0A%0Afrom unbalanced_dataset.under_sampling import ClusterCentroids%0A%0A# Generate the dataset%0AX, y = make_classification(n_classes=2, class_sep=2, weights=%5B0.1, 0.9%5D,%0A n_informative=3, n_redundant=1, flip_y=0,%0A n_features=20, n_clusters_per_class=1,%0A n_samples=5000, random_state=10)%0A%0A# Instanciate a PCA object for the sake of easy visualisation%0Apca = PCA(n_components=2)%0A# Fit and transform x to visualise inside a 2D feature space%0AX_vis = pca.fit_transform(X)%0A%0A# Apply the random under-sampling%0Acc = ClusterCentroids()%0AX_resampled, y_resampled = cc.fit_transform(X, y)%0AX_res_vis = pca.transform(X_resampled)%0A%0A# Two subplots, unpack the axes array immediately%0Af, (ax1, ax2) = plt.subplots(1, 2)%0A%0Aax1.scatter(X_vis%5By == 0, 0%5D, X_vis%5By == 0, 1%5D, label=%22Class #0%22, alpha=0.5,%0A edgecolor=almost_black, facecolor=palette%5B0%5D, linewidth=0.15)%0Aax1.scatter(X_vis%5By == 1, 0%5D, X_vis%5By == 1, 1%5D, label=%22Class #1%22, alpha=0.5,%0A edgecolor=almost_black, facecolor=palette%5B2%5D, linewidth=0.15)%0Aax1.set_title('Original set')%0A%0Aax2.scatter(X_res_vis%5By_resampled == 0, 0%5D, X_res_vis%5By_resampled == 0, 1%5D,%0A label=%22Class #0%22, alpha=.5, edgecolor=almost_black,%0A facecolor=palette%5B0%5D, linewidth=0.15)%0Aax2.scatter(X_res_vis%5By_resampled == 1, 0%5D, X_res_vis%5By_resampled == 1, 1%5D,%0A label=%22Class #1%22, alpha=.5, edgecolor=almost_black,%0A facecolor=palette%5B2%5D, linewidth=0.15)%0Aax2.set_title('Cluster centroids')%0A%0Aplt.show()%0A
|
|
25cd25dab4de9e6963ffa622474b3f0bdcdc1e48 | Create preprocessor.py | interpreter/preprocessor.py | interpreter/preprocessor.py | Python | 0.000006 | @@ -0,0 +1 @@
+%0A
|
|
0c1ccd5180601d3ed3f5dc98b3330d40c014f7c0 | Add simul. (#3300) | var/spack/repos/builtin/packages/simul/package.py | var/spack/repos/builtin/packages/simul/package.py | Python | 0.000027 | @@ -0,0 +1,1861 @@
+##############################################################################%0A# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass Simul(Package):%0A %22%22%22simul is an MPI coordinated test of parallel %0A filesystem system calls and library functions. %22%22%22%0A%0A homepage = %22https://github.com/LLNL/simul%22%0A url = %22https://github.com/LLNL/simul/archive/1.16.tar.gz%22%0A%0A version('1.16', 'd616c1046a170c1e1b7956c402d23a95')%0A version('1.15', 'a5744673c094a87c05c6f0799d1f496f')%0A version('1.14', 'f8c14f0bac15741e2af354e3f9a0e30f')%0A version('1.13', '8a80a62d569557715d6c9c326e39a8ef')%0A%0A depends_on('mpi')%0A%0A def install(self, spec, prefix):%0A make('simul')%0A mkdirp(prefix.bin)%0A install('simul', prefix.bin)%0A
|
|
6cb953dc01a77bc549c53cc325a741d1952ed6b6 | Bump FIDO version to 1.3.12 | fpr/migrations/0025_update_fido_1312.py | fpr/migrations/0025_update_fido_1312.py | Python | 0 | @@ -0,0 +1,1953 @@
+# -*- coding: utf-8 -*-%0A%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Adef data_migration_up(apps, schema_editor):%0A %22%22%22%0A Update identification tool FIDO to 1.3.12, correcting a%0A character-spacing issue bug identified in PRONOM94 (again)%0A %22%22%22%0A%0A idtool = apps.get_model('fpr', 'IDTool')%0A idcommand = apps.get_model('fpr', 'IDCommand')%0A%0A # Update Fido tool%0A idtool.objects%5C%0A .filter(uuid='c33c9d4d-121f-4db1-aa31-3d248c705e44')%5C%0A .update(version='1.3.12', slug='fido-1312')%0A%0A # Create new command using the new version of Fido%0A old_fido_command = idcommand.objects%5C%0A .get(uuid='e586f750-6230-42d7-8d12-1e24ca2aa658')%0A%0A idcommand.objects.create(%0A uuid='213d1589-c255-474f-81ac-f0a618181e40',%0A description=u'Identify using Fido 1.3.12',%0A config=old_fido_command.config,%0A script=old_fido_command.script,%0A script_type=old_fido_command.script_type,%0A tool=idtool.objects.get(uuid='c33c9d4d-121f-4db1-aa31-3d248c705e44'),%0A enabled=True%0A )%0A old_fido_command.enabled = False%0A old_fido_command.save()%0A%0A%0Adef data_migration_down(apps, schema_editor):%0A %22%22%22%0A Revert FIDO to previous version%0A %22%22%22%0A%0A idtool = apps.get_model('fpr', 'IDTool')%0A idcommand = apps.get_model('fpr', 'IDCommand')%0A%0A # Remove new ID Commands%0A idcommand.objects%5C%0A .filter(uuid='213d1589-c255-474f-81ac-f0a618181e40').delete()%0A%0A # Revert Fido tool%0A idtool.objects%5C%0A .filter(uuid='c33c9d4d-121f-4db1-aa31-3d248c705e44')%5C%0A .update(version='1.3.10', slug='fido-1310')%0A%0A # Restore Fido command%0A idcommand.objects%5C%0A .filter(uuid='e586f750-6230-42d7-8d12-1e24ca2aa658')%5C%0A .update(enabled=True)%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('fpr', '0024_update_fido'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(data_migration_up, data_migration_down),%0A %5D%0A
|
|
8fa9a54c9a5ee683fc9e9d361a4eb7affe5e83ed | Add functions to paint game of life to screen | game_of_life.py | game_of_life.py | Python | 0 | @@ -0,0 +1,757 @@
+#!/usr/bin/env python%0Afrom curses import wrapper%0Afrom time import sleep%0A%0Adef enumerate_lines(matrix):%0A on = '*'%0A off = ' '%0A for i, row in enumerate(matrix):%0A yield i, ''.join(on if v else off for v in row)%0A%0Adef paint(stdscr, matrix):%0A stdscr.clear()%0A for i, line in enumerate_lines(matrix):%0A stdscr.addstr(i, 0, line)%0A stdscr.refresh()%0A%0A%0Asize = 50%0Am1 = %5B%0A %5Bi == j or i == size - j for j in xrange(0, size + 1)%5D%0A for i in xrange(0, size + 1)%0A%5D%0Am2 = %5B%0A %5Bi == size / 2 or j == size / 2 for j in xrange(0, size + 1)%5D%0A for i in xrange(0, size + 1)%0A%5D%0A%0Adef main(stdscr):%0A for i in xrange(0,100):%0A matrix = m1 if i %25 2 else m2%0A paint(stdscr, matrix)%0A sleep(0.5)%0A stdscr.getkey()%0A%0Awrapper(main)%0A
|
|
ff8cee4f98dde0533751dfd15308c5fdfdec3982 | test file for rapid iteration | tests/quick_test.py | tests/quick_test.py | Python | 0 | @@ -0,0 +1,1880 @@
+%22%22%22%0Anosetests -sv --nologcapture tests/quick_test.py%0A%22%22%22%0A%0Aimport datetime%0Aimport os%0Aimport random%0Aimport sys%0Asys.path = %5Bos.path.abspath(os.path.dirname(__file__))%5D + sys.path%0Aos.environ%5B'is_test_suite'%5D = 'True'%0Aos.environ%5B'KERAS_BACKEND'%5D = 'theano'%0A%0Afrom auto_ml import Predictor%0Afrom auto_ml.utils_models import load_ml_model%0A%0Afrom nose.tools import assert_equal, assert_not_equal, with_setup%0Afrom sklearn.metrics import accuracy_score%0A%0Aimport dill%0Aimport numpy as np%0Aimport utils_testing as utils%0A%0A%0A# def regression_test():%0A# # a random seed of 42 has ExtraTreesRegressor getting the best CV score, and that model doesn't generalize as well as GradientBoostingRegressor.%0A# np.random.seed(0)%0A%0A# df_boston_train, df_boston_test = utils.get_boston_regression_dataset()%0A%0A# column_descriptions = %7B%0A# 'MEDV': 'output'%0A# , 'CHAS': 'categorical'%0A# %7D%0A%0A# ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)%0A%0A# ml_predictor.train(df_boston_train, model_names=%5B'DeepLearningRegressor'%5D)%0A%0A# test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)%0A%0A# print('test_score')%0A# print(test_score)%0A%0A# assert -3.35 %3C test_score %3C -2.8%0A%0A%0Adef classification_test(model_name=None):%0A np.random.seed(0)%0A%0A df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()%0A%0A column_descriptions = %7B%0A 'survived': 'output'%0A , 'embarked': 'categorical'%0A , 'pclass': 'categorical'%0A %7D%0A%0A ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)%0A%0A ml_predictor.train(df_titanic_train, model_names=%5B'DeepLearningClassifier'%5D)%0A%0A test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)%0A%0A print('test_score')%0A print(test_score)%0A%0A assert -0.215 %3C test_score %3C -0.17%0A%0A
|
|
c98a744f5f436ae2c6266a7bb5d32173cfd0e4a9 | Add a script that scrapes the Socrata catalog, just in case we need that in another format | scripts/socrata_scraper.py | scripts/socrata_scraper.py | Python | 0 | @@ -0,0 +1,2703 @@
+#!/usr/bin/python3%0A%0A%22%22%22%0AThis is a basic script that downloads the catalog data from the smcgov.org%0Awebsite and pulls out information about all the datasets.%0A%0AThis is in python3%0A%0AThere is an optional download_all argument that will allow you to download%0Aall of the datasets individually and in their entirety. I have included this%0Aas a demonstration, but it should not be commonly used because it takes a%0Awhile and beats up on the smcgov data portal, which you should avoid.%0A%0A%22%22%22%0A%0Aimport sys%0Aimport json%0Aimport argparse%0Aimport collections%0Aimport urllib.request%0A%0AURL = %22https://data.smcgov.org/api/catalog?limit=999999999&only=datasets%22%0A%0Adef main(args):%0A category_data = collections.defaultdict(list)%0A domain_data = collections.defaultdict(list)%0A data_downloads = %5B%5D%0A datasets_with_location = %5B%5D%0A with urllib.request.urlopen(URL) as raw_data:%0A data = json.loads(raw_data.read().decode('utf-8'))%0A for result in data%5B'results'%5D:%0A categories = result%5B'classification'%5D%5B'categories'%5D%0A domain = result%5B'classification'%5D%5B'domain_category'%5D%0A if categories is None or categories == %5B%5D:%0A categories = %5B'NULL'%5D%0A permalink = result%5B'permalink'%5D%0A data_downloads.append('%7B%7D.json'.format(permalink))%0A domain_data%5Bdomain%5D.append(permalink)%0A for category in categories:%0A category_data%5Bcategory%5D.append(permalink)%0A%0A if args.download_all:%0A for download_url in data_downloads:%0A with urllib.request.urlopen(download_url) as dataset_file:%0A print('Downloading %7B%7D'.format(download_url))%0A dataset = json.loads(dataset_file.read().decode('utf-8'))%0A if len(dataset) %3C 1:%0A continue%0A if 'location_1' in dataset%5B0%5D.keys():%0A # Our best guess on which datasets have location info.%0A datasets_with_location.append(download_url)%0A%0A if args.download_all:%0A print('Datasets with location_1 key')%0A print(datasets_with_location)%0A print('----------------------------------------------------')%0A print('Number of Datasets by Category')%0A for key, values in category_data.items():%0A print(key, len(values))%0A print('----------------------------------------------------')%0A print('Number of Datasets by Domain')%0A for key, values in domain_data.items():%0A print(key, len(values))%0A%0Aif __name__=='__main__':%0A import argparse%0A parser = argparse.ArgumentParser()%0A parser.add_argument('--download_all', help='Download all datasets',%0A action='store_true')%0A args = parser.parse_args()%0A main(args=args)%0A
|
|
0c11d2740e561586bb4f9d2b67bda2ccc87e146e | Add new command to notify New Relic of deployment | ixdjango/management/commands/newrelic_notify_deploy.py | ixdjango/management/commands/newrelic_notify_deploy.py | Python | 0 | @@ -0,0 +1,1665 @@
+%22%22%22%0AManagement command to enable New Relic notification of deployments%0A%0A.. moduleauthor:: Infoxchange Development Team %[email protected]%3E%0A%0A%22%22%22%0A%0Aimport pwd%0Aimport os%0Afrom subprocess import Popen, PIPE%0Afrom urllib import urlencode%0A%0Afrom httplib2 import Http%0A%0Afrom django.conf import settings%0Afrom django.core.management.base import NoArgsCommand%0A%0Aimport newrelic.agent%0A%0A%0Aclass Command(NoArgsCommand):%0A %22%22%22%0A Loads the fixtures contained inside IX_FIXTURES setting variable.%0A%0A See http://redmine.office.infoxchange.net.au/issues/8376%0A %22%22%22%0A%0A URL = 'https://rpm.newrelic.com/deployments.xml'%0A%0A def handle_noargs(self, **options):%0A newrelic.agent.initialize(%0A settings.NEW_RELIC_CONFIG,%0A settings.NEW_RELIC_ENV%0A )%0A%0A config = newrelic.agent.global_settings()%0A%0A if not config.monitor_mode:%0A return%0A%0A # get the current git version%0A git = Popen(('git', 'describe'), stdout=PIPE)%0A ver, _ = git.communicate()%0A ver = ver.strip()%0A%0A # get the current user%0A user = pwd.getpwuid(os.getuid())%0A%0A headers = %7B%0A 'x-api-key': config.license_key%0A %7D%0A post = %7B%0A 'deployment%5Bapp_name%5D': config.app_name,%0A 'deployment%5Brevision%5D': ver,%0A 'deployment%5Buser%5D': '%25s (%25s)' %25 (user.pw_gecos, user.pw_name),%0A %7D%0A%0A print %22Informing New Relic...%22,%0A%0A # post this data%0A http = Http()%0A response, _ = http.request(self.URL, 'POST',%0A headers=headers,%0A body=urlencode(post))%0A%0A print response%5B'status'%5D%0A
|
|
c0ea919305bcedf080a2213f4c549c68fa4efa2d | test tools | tests/test_tools.py | tests/test_tools.py | Python | 0.000002 | @@ -0,0 +1,974 @@
+import unittest2 as unittest%0Afrom fabric.api import run%0Aimport tempfile%0A%0Afrom mixins import WebServerMixin%0Afrom parcel.tools import dl, rpull, rpush%0A%0Adef tempname():%0A return tempfile.mkstemp()%5B1%5D%0A %0Aimport zlib, os%0A%0Adef crc32(filename):%0A CHUNKSIZE = 8192%0A checksum = 0%0A with open(filename, 'rb') as fh:%0A bytes = fh.read(CHUNKSIZE)%0A while bytes:%0A checksum = zlib.crc32(bytes, checksum)%0A bytes = fh.read(CHUNKSIZE)%0A return checksum%0A%0A%0Aclass ToolsTestSuite(unittest.TestCase, WebServerMixin):%0A %22%22%22Tools test cases.%22%22%22%0A %0A def test_dl(self):%0A self.startWebServer()%0A %0A filename = tempname()%0A %0A dl(%22http://localhost:%25s/tip.tar.gz%22%25self.port,filename)%0A %0A # there should be no differences between the files%0A self.assertEquals(crc32(filename),crc32(os.path.join(self.webroot,'tip.tar.gz')))%0A %0A # shutdown webserver%0A self.stopWebServer()%0A %0A
|
|
9de5728e5fdb0f7dc606681df685eb084477d8d0 | Add exercise | multiplyTwoNumbers.py | multiplyTwoNumbers.py | Python | 0.000196 | @@ -0,0 +1,170 @@
+#!/usr/bin/env python%0A%0Adef main():%0A a = input(%22Enter a number: %22)%0A b = input(%22Enter another number: %22)%0A print %22The product of %25d and %25d is %25d%22 %25 (a, b, a * b)%0A%0Amain()%0A
|
|
f883edc209928494c45693c5ecfd279bfbb09c97 | Add partfrac1 | timing/partfrac1.py | timing/partfrac1.py | Python | 0.999998 | @@ -0,0 +1,1218 @@
+import time%0Afrom lcapy import *%0A%0Afuncs = %5B1 / s, 1 / s**2, 1 / (s + 3), 1 / (s + 3)**2, (s + 3) / (s + 4),%0A 1 / (s + 3)**2 / (s + 4), 1 / (s + 3)**3 / (s + 4),%0A 1 / (s + 3) / (s + 4) / (s + 5), (s + 6) / (s + 3) / (s + 4) / (s + 5),%0A 1 / (s + 3)**2 / (s + 4)**2, 1 / (s + 3)**3 / (s + 4)**2,%0A s / (s + 3)**2 / (s + 4), s / (s + 3)**3 / (s + 4)%5D%0A%0ANtrials = 10%0Amethods = ('ec', 'sub')%0Atimes = %7B%7D%0A%0Afor func in funcs:%0A ans1 = func.partfrac(method='ec')%0A ans2 = func.partfrac(method='sub')%0A if ans1 != func:%0A print('Wrong answer for eq: ', func)%0A if ans2 != func:%0A print('Wrong answer for sub: ', func)%0A%0A%0Afor method in methods:%0A times%5Bmethod%5D = %5B%5D%0A%0A for func in funcs:%0A start = time.perf_counter()%0A for i in range(Ntrials):%0A func.partfrac(method=method)%0A stop = time.perf_counter()%0A times%5Bmethod%5D.append((stop - start) / Ntrials)%0A%0Aimport numpy as np%0Afrom matplotlib.pyplot import subplots, style, savefig, show%0A%0Aindex = np.arange(len(funcs))%0A%0Afig, axes = subplots(1)%0Aaxes.bar(index, times%5B'ec'%5D, 0.35, label='ec')%0Aaxes.bar(index+0.35, times%5B'sub'%5D, 0.35, label='subs')%0Aaxes.legend()%0Aaxes.set_ylabel('Time (s)')%0A%0Ashow()%0A
|
|
e71c232660a7480c2b56f6e76e83fad4c7e9da8a | Add ctm_test.py test for testing CRTC's CTM color matrix property. | py/tests/ctm_test.py | py/tests/ctm_test.py | Python | 0 | @@ -0,0 +1,1706 @@
+#!/usr/bin/python3%0A%0Aimport sys%0Aimport pykms%0A%0Adef ctm_to_blob(ctm, card):%0A len=9%0A arr = bytearray(len*8)%0A view = memoryview(arr).cast(%22I%22)%0A%0A for x in range(len):%0A i, d = divmod(ctm%5Bx%5D, 1)%0A if i %3C 0:%0A i = -i%0A sign = 1 %3C%3C 31%0A else:%0A sign = 0%0A view%5Bx * 2 + 0%5D = int(d * ((2 ** 32) - 1))%0A view%5Bx * 2 + 1%5D = int(i) %7C sign%0A #print(%22%25f = %2508x.%2508x%22 %25 (ctm%5Bx%5D, view%5Bx * 2 + 1%5D, view%5Bx * 2 + 0%5D))%0A%0A return pykms.Blob(card, arr);%0A%0A%0Aif len(sys.argv) %3E 1:%0A conn_name = sys.argv%5B1%5D%0Aelse:%0A conn_name = %22%22%0A%0Acard = pykms.Card()%0Ares = pykms.ResourceManager(card)%0Aconn = res.reserve_connector(conn_name)%0Acrtc = res.reserve_crtc(conn)%0Amode = conn.get_default_mode()%0A%0Afb = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, %22XR24%22);%0Apykms.draw_test_pattern(fb);%0A%0Acrtc.set_mode(conn, fb, mode)%0A%0Ainput(%22press enter to set normal ctm%5Cn%22)%0A%0Actm = %5B 1.0,%090.0,%090.0,%0A 0.0,%091.0,%090.0,%0A 0.0,%090.0,%091.0 %5D%0A%0Actmb = ctm_to_blob(ctm, card)%0A%0Acrtc.set_prop(%22CTM%22, ctmb.id)%0A%0Ainput(%22press enter to set new ctm%5Cn%22)%0A%0Actm = %5B 0.0,%091.0,%090.0,%0A 0.0,%090.0,%091.0,%0A 1.0,%090.0,%090.0 %5D%0A%0Actmb = ctm_to_blob(ctm, card)%0A%0Acrtc.set_prop(%22CTM%22, ctmb.id)%0A%0Aprint(%22r-%3Eb g-%3Er b-%3Eg ctm active%5Cn%22)%0A%0Ainput(%22press enter to set new ctm%5Cn%22)%0A%0Actm = %5B 0.0,%090.0,%091.0,%0A 1.0,%090.0,%090.0,%0A 0.0,%091.0,%090.0 %5D%0A%0Actmb = ctm_to_blob(ctm, card)%0A%0Acrtc.set_prop(%22CTM%22, ctmb.id)%0Ainput(%22r-%3Eg g-%3Eb b-%3Er ctm active%5Cn%22)%0A%0Ainput(%22press enter to turn off the crtc%5Cn%22)%0A%0Acrtc.disable_mode()%0A%0Ainput(%22press enter to enable crtc again%5Cn%22)%0A%0Acrtc.set_mode(conn, fb, mode)%0A%0Ainput(%22press enter to remove ctm%5Cn%22)%0A%0Acrtc.set_prop(%22CTM%22, 0)%0A%0Ainput(%22press enter to exit%5Cn%22)%0A
|
|
04477b11bbe7efa1720829691b7d1c3fe2a7a492 | Add __init__ | h2/__init__.py | h2/__init__.py | Python | 0.000917 | @@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0Ah2%0A~~%0A%0AA HTTP/2 implementation.%0A%22%22%22%0A__version__ = '0.1.0'%0A
|
|
2e2ad49c7ada145b5a4a81bd8941cf5e72d2d81b | Test case for wordaxe bug | rst2pdf/tests/input/test_180.py | rst2pdf/tests/input/test_180.py | Python | 0 | @@ -0,0 +1,661 @@
+# -*- coding: utf-8 -*-%0Afrom reportlab.platypus import SimpleDocTemplate%0Afrom reportlab.platypus.paragraph import Paragraph%0Afrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle%0Afrom reportlab.lib.colors import Color%0Afrom reportlab.platypus.flowables import _listWrapOn, _FUZZ%0Afrom wordaxe.rl.NewParagraph import Paragraph%0Afrom wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet%0A%0A%0Adef go():%0A styles = getSampleStyleSheet()%0A style=styles%5B'Normal'%5D%0A %0A p1 = Paragraph('This is a paragraph', style )%0A print p1.wrap(500,701)%0A print p1._cache%5B'avail'%5D%0A print len(p1.split(500,701))%0A print len(p1.split(500,700))%0A%0Ago()%0A
|
|
698eee3db238189ba066670c4fe4a1193e6a942a | add flask-login | app/user/loginmanager.py | app/user/loginmanager.py | Python | 0.000001 | @@ -0,0 +1,223 @@
+from flask.ext.login import LoginManager%0Afrom models import User%0A%0Alogin_manager = LoginManager()%0A%0A@login_manager.user_loader%0Adef user_loader(user_id):%0A return User.query.get(user_id)%0A%0Alogin_manager.login_view = '.login'%0A
|
|
d5d8e16b5ccbbb65398ce015f020db3839fac409 | add test_rotate.py | tests/transforms_tests/image_tests/test_rotate.py | tests/transforms_tests/image_tests/test_rotate.py | Python | 0.000003 | @@ -0,0 +1,591 @@
+import random%0Aimport unittest%0A%0Aimport numpy as np%0A%0Afrom chainer import testing%0Afrom chainercv.transforms import flip%0Afrom chainercv.transforms import rotate%0A%0A%0Aclass TestRotate(unittest.TestCase):%0A%0A def test_rotate(self):%0A img = np.random.uniform(size=(3, 32, 24))%0A angle = random.uniform(0, 180)%0A%0A out = rotate(img, angle)%0A expected = flip(img, x_flip=True)%0A expected = rotate(expected, -1 * angle)%0A expected = flip(expected, x_flip=True)%0A%0A np.testing.assert_almost_equal(out, expected, decimal=6)%0A%0A%0Atesting.run_module(__name__, __file__)%0A
|
|
de74c933b74d9066984fe040edf026b7d9f87711 | Split problem statement 2 | 69_split_problem_statement_2.py | 69_split_problem_statement_2.py | Python | 0.99994 | @@ -0,0 +1,780 @@
+'''%0AOpen the file sample.txt and read it line by line.%0AWhen you find a line that starts with 'From:' like the following line:%0A%0A From [email protected] Sat Jan 5 09:14:16 2008%0A%0AYou will parse the From line using split() and print out the second word in the line%0A(i.e. the entire address of the person who sent the message).%0AThen print out a count at the end.%0A%0AHint: make sure not to include the lines that start with 'From:'.%0A'''%0AfileName = raw_input(%22Enter file name : %22)%0Aif len(fileName) %3C 1 : fileName = %22sample.txt%22%0A%0AopenFile = open(fileName)%0Acount = 0%0Awords = list()%0A%0Afor line in openFile:%0A%09if not line.startswith(%22From:%22):%0A%09%09continue%0A%09count += 1%0A%09words = line.split()%0A%09print words%5B1%5D%0A%0Aprint %22There were%22, count, %22lines in the file with 'From:' as the first word.%22%0A
|
|
01029805a6fb3484cf803f0c0abd18232b4ad810 | Add database tools | egoio/tools/db.py | egoio/tools/db.py | Python | 0.000001 | @@ -0,0 +1,1679 @@
+def grant_db_access(conn, schema, table, role):%0A r%22%22%22Gives access to database users/ groups%0A%0A Parameters%0A ----------%0A conn : sqlalchemy connection object%0A A valid connection to a database%0A schema : str%0A The database schema%0A table : str%0A The database table%0A role : str%0A database role that access is granted to%0A%0A %22%22%22%0A grant_str = %22%22%22GRANT ALL ON TABLE %7Bschema%7D.%7Btable%7D%0A TO %7Brole%7D WITH GRANT OPTION;%22%22%22.format(schema=schema, table=table,%0A role=role)%0A%0A conn.execute(grant_str)%0A%0A%0Adef add_primary_key(conn, schema, table, pk_col):%0A r%22%22%22Adds primary key to database table%0A%0A Parameters%0A ----------%0A conn : sqlalchemy connection object%0A A valid connection to a database%0A schema : str%0A The database schema%0A table : str%0A The database table%0A pk_col : str%0A Column that primary key is applied to%0A%0A %22%22%22%0A sql_str = %22%22%22alter table %7Bschema%7D.%7Btable%7D add primary key (%7Bcol%7D)%22%22%22.format(%0A schema=schema, table=table, col=pk_col)%0A%0A conn.execute(sql_str)%0A%0A%0Adef change_owner_to(conn, schema, table, role):%0A r%22%22%22Changes table's ownership to role%0A%0A Parameters%0A ----------%0A conn : sqlalchemy connection object%0A A valid connection to a database%0A schema : str%0A The database schema%0A table : str%0A The database table%0A role : str%0A database role that access is granted to%0A%0A %22%22%22%0A sql_str = %22%22%22ALTER TABLE %7Bschema%7D.%7Btable%7D%0A OWNER TO %7Brole%7D;%22%22%22.format(schema=schema,%0A table=table,%0A role=role)%0A%0A conn.execute(sql_str)
|
|
bc9072cee7ce880c30af83ee4c239ae9cf1ddbfe | Create NumberofIslandsII_001.py | lintcode/Number-of-Islands-II/NumberofIslandsII_001.py | lintcode/Number-of-Islands-II/NumberofIslandsII_001.py | Python | 0.000215 | @@ -0,0 +1,1878 @@
+# Definition for a point.%0A# class Point:%0A# def __init__(self, a=0, b=0):%0A# self.x = a%0A# self.y = b%0A%0A%0Aclass UnionFind:%0A def __init__(self, n, m):%0A self.fathers = %7B%7D%0A self.nsets = 0%0A self.grid = %5B%5B0 for _ in range(m)%5D for _ in range(n)%5D%0A self.n = n%0A self.m = m%0A%0A def build_island(self, i, j):%0A self.grid%5Bi%5D%5Bj%5D = 1%0A self.fathers%5Bi * self.m + j%5D = i * self.m + j%0A self.nsets += 1%0A nbrs = %5B%5D%0A nbrs.append(%5Bi, j - 1%5D)%0A nbrs.append(%5Bi, j + 1%5D)%0A nbrs.append(%5Bi - 1, j%5D)%0A nbrs.append(%5Bi + 1, j%5D)%0A for nbr in nbrs:%0A if -1 %3C nbr%5B0%5D %3C self.n and -1 %3C nbr%5B1%5D %3C self.m:%0A if self.grid%5Bnbr%5B0%5D%5D%5Bnbr%5B1%5D%5D == 1:%0A idx1 = i * self.m + j%0A idx2 = nbr%5B0%5D * self.m + nbr%5B1%5D%0A self.union(idx1, idx2)%0A%0A def find(self, idx):%0A return self.compressed_find(idx)%0A%0A def compressed_find(self, idx):%0A fidx = self.fathers%5Bidx%5D%0A if fidx != idx:%0A self.fathers%5Bidx%5D = self.find(fidx)%0A return self.fathers%5Bidx%5D%0A%0A def union(self, i, j):%0A fi = self.find(i)%0A fj = self.find(j)%0A if fi != fj:%0A self.fathers%5Bfi%5D = fj%0A self.nsets -= 1%0A%0A def get_nsets(self):%0A return self.nsets%0A%0A%0Aclass Solution:%0A # @param %7Bint%7D n an integer%0A # @param %7Bint%7D m an integer%0A # @param %7BPint%5B%5D%7D operators an array of point%0A # @return %7Bint%5B%5D%7D an integer array%0A def numIslands2(self, n, m, operators):%0A # Write your code here%0A if n == 0 or m == 0:%0A return 0%0A%0A uf, res = UnionFind(n, m), %5B%5D%0A for oper in operators:%0A i, j = oper.x, oper.y%0A if -1 %3C i %3C n and -1 %3C j %3C m:%0A uf.build_island(i, j)%0A res.append(uf.get_nsets())%0A return res%0A
|
|
1ced3a967742783ef649f7c7defecf333050d547 | Update http_endpoint to use convert_xml() | jenkins_jobs/modules/notifications.py | jenkins_jobs/modules/notifications.py | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Notifications module allows you to configure Jenkins to notify
other applications about various build phases. It requires the
Jenkins notification plugin.
**Component**: notifications
:Macro: notification
:Entry Point: jenkins_jobs.notifications
"""
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
import jenkins_jobs.modules.base
def http_endpoint(registry, xml_parent, data):
"""yaml: http
Defines an HTTP notification endpoint.
Requires the Jenkins :jenkins-wiki:`Notification Plugin
<Notification+Plugin>`.
:arg str format: notification payload format, JSON (default) or XML
:arg str event: job events that trigger notifications: started,
completed, finalized or all (default)
:arg str url: URL of the endpoint
:arg str timeout: Timeout in milliseconds for sending notification
request (30 seconds by default)
:arg str log: Number lines of log messages to send (0 by default).
Use -1 for all (use with caution).
Example:
.. literalinclude:: \
/../../tests/notifications/fixtures/http-endpoint002.yaml
:language: yaml
"""
endpoint_element = XML.SubElement(xml_parent,
'com.tikal.hudson.plugins.notification.'
'Endpoint')
supported_formats = ['JSON', 'XML']
fmt = data.get('format', 'JSON').upper()
if fmt not in supported_formats:
raise JenkinsJobsException(
"format must be one of %s" %
", ".join(supported_formats))
else:
XML.SubElement(endpoint_element, 'format').text = fmt
XML.SubElement(endpoint_element, 'protocol').text = 'HTTP'
supported_events = ['started', 'completed', 'finalized', 'all']
event = data.get('event', 'all').lower()
if event not in supported_events:
raise JenkinsJobsException(
"event must be one of %s" %
", ".join(supported_events))
else:
XML.SubElement(endpoint_element, 'event').text = event
XML.SubElement(endpoint_element, 'timeout').text = str(data.get('timeout',
30000))
XML.SubElement(endpoint_element, 'url').text = data['url']
XML.SubElement(endpoint_element, 'loglines').text = str(data.get('log', 0))
class Notifications(jenkins_jobs.modules.base.Base):
sequence = 22
component_type = 'notification'
component_list_type = 'notifications'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
notifications = data.get('notifications', [])
if notifications:
notify_element = XML.SubElement(properties,
'com.tikal.hudson.plugins.'
'notification.'
'HudsonNotificationProperty')
endpoints_element = XML.SubElement(notify_element, 'endpoints')
for endpoint in notifications:
self.registry.dispatch('notification',
endpoints_element, endpoint)
| Python | 0.000001 | @@ -903,12 +903,14 @@
ML%0A%0A
-from
+import
jen
@@ -923,75 +923,84 @@
obs.
-errors import JenkinsJobsException%0Aimport jenkins_jobs.modules.base
+modules.base%0Afrom jenkins_jobs.modules.helpers import convert_mapping_to_xml
%0A%0A%0Ad
@@ -2002,346 +2002,8 @@
L'%5D%0A
- fmt = data.get('format', 'JSON').upper()%0A if fmt not in supported_formats:%0A raise JenkinsJobsException(%0A %22format must be one of %25s%22 %25%0A %22, %22.join(supported_formats))%0A else:%0A XML.SubElement(endpoint_element, 'format').text = fmt%0A%0A XML.SubElement(endpoint_element, 'protocol').text = 'HTTP'%0A%0A
@@ -2070,20 +2070,18 @@
l'%5D%0A
-even
+fm
t = data
@@ -2090,26 +2090,28 @@
et('
-event', 'all').low
+format', 'JSON').upp
er()
@@ -2119,301 +2119,215 @@
-if
event
-not in supported_events:%0A raise JenkinsJobsException(%0A %22event must be one of %25s%22 %25%0A %22, %22.join(supported_events))%0A else:%0A XML.SubElement(endpoint_element, 'event').text = event%0A%0A XML.SubElement(endpoint_element, 'timeout').text = str(data.get(
+= data.get('event', 'all').lower()%0A mapping = %5B%0A ('', 'format', fmt, supported_formats),%0A ('', 'protocol', 'HTTP'),%0A ('', 'event', event, supported_events),%0A ('timeout',
'tim
@@ -2332,16 +2332,24 @@
imeout',
+ 30000),
%0A
@@ -2353,208 +2353,140 @@
- 30000))
+('url', 'url', None),%0A ('log', 'loglines', 0)%5D
%0A
-XML.SubElement(endpoint_element, 'url').text = data%5B'url'%5D%0A XML.SubElement(endpoint_element, 'loglines').text = str(data.get('log', 0)
+convert_mapping_to_xml(endpoint_element, data, mapping, fail_required=True
)%0A%0A%0A
|
4856b426b380d4d46cccc2f5b8ab2212956a96c2 | test of time module. not terribly fancy, but it does touch every function and variable in the module, verifies a few return values and even tests a couple of known error conditions. | Lib/test/test_time.py | Lib/test/test_time.py | Python | 0 | @@ -0,0 +1,800 @@
+import time%0A%0Atime.altzone%0Atime.clock()%0At = time.time()%0Atime.asctime(time.gmtime(t))%0Aif time.ctime(t) %3C%3E time.asctime(time.localtime(t)):%0A print 'time.ctime(t) %3C%3E time.asctime(time.localtime(t))'%0A%0Atime.daylight%0Aif int(time.mktime(time.localtime(t))) %3C%3E int(t):%0A print 'time.mktime(time.localtime(t)) %3C%3E t'%0A%0Atime.sleep(1.2)%0Att = time.gmtime(t)%0Afor directive in ('a', 'A', 'b', 'B', 'c', 'd', 'E', 'H', 'I',%0A%09%09 'j', 'm', 'M', 'n', 'N', 'o', 'p', 'S', 't',%0A%09%09 'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%25'):%0A format = '%25' + directive%0A time.strftime(format, tt)%0A%0Atime.timezone%0Atime.tzname%0A%0A# expected errors%0Atry:%0A time.asctime(0)%0Aexcept TypeError:%0A pass%0A%0Atry:%0A time.mktime((999999, 999999, 999999, 999999,%0A%09%09 999999, 999999, 999999, 999999,%0A%09%09 999999))%0Aexcept OverflowError:%0A pass%0A
|
|
c851501cc8149685a9e9c023aa200b92c17a9078 | Add decoder ida fields name | pida_fields.py | pida_fields.py | Python | 0.000001 | @@ -0,0 +1,306 @@
+def decode_name_fields(ida_fields):%0A i = -1%0A stop = len(ida_fields)%0A while True:%0A i += 1%0A if i == stop:%0A break%0A%0A count = ord(ida_fields%5Bi%5D) - 1%0A if count == 0:%0A continue%0A%0A i += 1%0A yield ida_fields%5Bi:i + count%5D%0A i += count - 1%0A
|
|
015c7f7fbab200084cf08bd1f7e35cbcd61b369e | Axonical hello world. | Sketches/PT/helloworld.py | Sketches/PT/helloworld.py | Python | 0.999999 | @@ -0,0 +1,895 @@
+#!/usr/bin/env python%0Aimport time%0Afrom Axon.Component import component%0Afrom Axon.Scheduler import scheduler%0A%0Aclass HelloPusher(component):%0A def main(self):%0A while True:%0A time.sleep(0.5) # normally this would be a bad idea, since the entire scheduler will halt inside this component. %0A self.send(%22%5Cn!ednom ,tulas%22, 'outbox')%0A yield 1%0A%0Aclass Reverser(component):%0A def main(self):%0A while True:%0A if self.dataReady('inbox'):%0A item = self.recv('inbox')%0A self.send(item%5B::-1%5D, 'outbox')%0A else: self.pause()%0A yield 1%0A%0Afrom Kamaelia.Chassis.Pipeline import Pipeline%0Afrom Kamaelia.Util.Console import ConsoleEchoer%0A%0Athepipe = Pipeline(HelloPusher(), Reverser(), ConsoleEchoer()).activate()%0A%0A# thepipe = Pipeline(HelloPusher(), Reverser(), ConsoleEchoer()).run()%0A%0Ascheduler.run.runThreads()
|
|
e869c7ef9e3d19da4c98cda57b5e22fb5a35cba5 | Add first basic unittests using py.test | tests/test_validators.py | tests/test_validators.py | Python | 0 | @@ -0,0 +1,2375 @@
+%22%22%22%0A test_validators%0A ~~~~~~~~~~~~~~%0A %0A Unittests for bundled validators.%0A %0A :copyright: 2007-2008 by James Crasta, Thomas Johansson.%0A :license: MIT, see LICENSE.txt for details.%0A%22%22%22%0A%0Afrom py.test import raises%0Afrom wtforms.validators import ValidationError, length, url, not_empty, email, ip_address%0A%0Aclass DummyForm(object):%0A pass%0A%0Aclass DummyField(object):%0A def __init__(self, data):%0A self.data = data%0A%0Aform = DummyForm()%0A%0Adef test_email():%0A assert email(form, DummyField('[email protected]')) == None%0A assert email(form, DummyField('[email protected]')) == None%0A assert email(form, DummyField('[email protected]')) == None%0A assert email(form, DummyField('[email protected]')) == None%0A raises(ValidationError, email, form, DummyField('foo')) == None%0A raises(ValidationError, email, form, DummyField('bar.dk')) == None%0A raises(ValidationError, email, form, DummyField('foo@')) == None%0A raises(ValidationError, email, form, DummyField('@bar.dk')) == None%0A raises(ValidationError, email, form, DummyField('foo@bar')) == None%0A raises(ValidationError, email, form, DummyField('[email protected]')) == None%0A raises(ValidationError, email, form, DummyField('[email protected]')) == None%0A%0Adef test_length():%0A field = DummyField('foobar')%0A assert length(min=2, max=6)(form, field) == None%0A raises(ValidationError, length(min=7), form, field)%0A raises(ValidationError, length(max=5), form, field)%0A %0Adef test_url():%0A assert url()(form, DummyField('http://foobar.dk')) == None%0A assert url()(form, DummyField('http://foobar.dk/')) == None%0A assert url()(form, DummyField('http://foobar.dk/foobar')) == None%0A raises(ValidationError, url(), form, DummyField('http://foobar'))%0A raises(ValidationError, url(), form, DummyField('foobar.dk'))%0A raises(ValidationError, url(), form, DummyField('http://foobar.12'))%0A%0Adef test_not_empty():%0A assert not_empty()(form, DummyField('foobar')) == None%0A raises(ValidationError, not_empty(), form, DummyField(''))%0A raises(ValidationError, not_empty(), form, DummyField(' '))%0A%0Adef test_ip_address():%0A assert ip_address(form, DummyField('127.0.0.1')) == None%0A raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))%0A raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))%0A raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))%0A %0A
|
|
54c358a296733d2a5236a9a776830f1b78682b73 | Add lc040_combination_sum_ii.py | lc040_combination_sum_ii.py | lc040_combination_sum_ii.py | Python | 0.004345 | @@ -0,0 +1,963 @@
+%22%22%22Leetcode 40. Combination Sum II%0AMedium%0A%0AURL: https://leetcode.com/problems/combination-sum-ii/%0A%0AGiven a collection of candidate numbers (candidates) and a target number (target),%0Afind all unique combinations in candidates where the candidate numbers sums to target.%0A%0AEach number in candidates may only be used once in the combination.%0A%0ANote:%0A%0AAll numbers (including target) will be positive integers.%0AThe solution set must not contain duplicate combinations.%0AExample 1:%0A%0AInput: candidates = %5B10,1,2,7,6,1,5%5D, target = 8,%0AA solution set is:%0A%5B%0A %5B1, 7%5D,%0A %5B1, 2, 5%5D,%0A %5B2, 6%5D,%0A %5B1, 1, 6%5D%0A%5D%0A%0AExample 2:%0AInput: candidates = %5B2,5,2,1,2%5D, target = 5,%0AA solution set is:%0A%5B%0A %5B1,2,2%5D,%0A %5B5%5D%0A%5D%0A%22%22%22%0A%0Aclass Solution(object):%0A def combinationSum2(self, candidates, target):%0A %22%22%22%0A :type candidates: List%5Bint%5D%0A :type target: int%0A :rtype: List%5BList%5Bint%5D%5D%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
7fab8c2d014f013131bd4d6301f5f8e5268d6037 | add leetcode Pow(x, n) | leetcode/powx-n/solution.py | leetcode/powx-n/solution.py | Python | 0.000908 | @@ -0,0 +1,483 @@
+# -*- coding:utf-8 -*-%0Aclass Solution:%0A # @param x, a float%0A # @param n, a integer%0A # @return a float%0A def pow(self, x, n):%0A if n == 0:%0A return 1%0A%0A if n %3C 0:%0A neg_flag = True%0A n = -n%0A else:%0A neg_flag = False%0A ret = 1%0A while n %3E 0:%0A if n %25 2 == 1:%0A ret *= x%0A x = x * x%0A n //= 2%0A if neg_flag:%0A return 1 / ret%0A return ret%0A
|
|
191b7d4edbcd01289242b8f82e0288adfdfc7f23 | call is returning 0 | initHeaders.py | initHeaders.py |
from pyepm import api, config
# ### #!/usr/bin/env python
def main():
api_config = config.read_config()
instance = api.Api(api_config)
from_ = "0x9dc2299a76b68b7ffa9e3ba0fd8cd7646d21d409"
to = "0x824c0d8b9b08a769d237a236af27572ff08ba145"
fun_name = "storeBlockHeader"
sig = "s"
data = '\x02\x00\x00\x00~\xf0U\xe1gM.eQ\xdb\xa4\x1c\xd2\x14\xde\xbb\xee4\xae\xb5D\xc7\xecg\x00\x00\x00\x00\x00\x00\x00\x00\xd3\x99\x89c\xf8\x0c[\xabC\xfe\x8c&"\x8e\x98\xd00\xed\xf4\xdc\xbeH\xa6f\xf5\xc3\x9e-z\x88\\\x91\x02\xc8mSl\x89\x00\x19Y:G\r\x02\x00\x00\x00Tr\xac\x8b\x11\x87\xbf\xcf\x91\xd6\xd2\x18\xbb\xda\x1e\xb2@]|U\xf1\xf8\xcc\x82\x00\x00\x00\x00\x00\x00\x00\x00\xab\n\xaa7|\xa3\xf4\x9b\x15E\xe2\xaek\x06g\xa0\x8fB\xe7-\x8c$\xae#q@\xe2\x8f\x14\xf3\xbb|k\xccmSl\x89\x00\x19\xed\xd8<\xcf\x02\x00\x00\x00\xa9\xab\x12\xe3,\xed\xdc+\xa5\xe6\xade\x1f\xacw,\x986\xdf\x83M\x91\xa0I\x00\x00\x00\x00\x00\x00\x00\x00\xdfuu\xc7\x8f\x83\x1f \xaf\x14~\xa7T\xe5\x84\xaa\xd9Yeiic-\xa9x\xd2\xddq\x86#\xfd0\xc5\xccmSl\x89\x00\x19\xe6Q\x07\xe9\x02\x00\x00\x00,P\x1f\xc0\xb0\xfd\xe9\xb3\xc1\x0e#S\xc1TI*5k\x1a\x02)^+\x86\x00\x00\x00\x00\x00\x00\x00\x00\xa7\xaaa\xc8\xd3|\x88v\xba\xa0\x17\x9ej2\x94D4\xbf\xd3\xe1\xccug\x89*1K\x0c{\x9e]\x92\'\xcemSl\x89\x00\x19\xa4\xa0<{\x02\x00\x00\x00\xe7\xfc\x91>+y\n0v\x0c\xaa\xfb\x9b_\xaa\xe1\xb5\x1dlT\xff\xe4\xae\x82\x00\x00\x00\x00\x00\x00\x00\x00P\xad\x11k\xfb\x11c\x03\x03a\xd9}H\xb4\xca\x90\'\xa4\x9b\xca\xf8\xb8\xd4!\x1b\xaa\x92\xccr\xe7\xe1#f\xcfmSl\x89\x00\x19\xe6\x13\x9c\x82'
gas = 3e6
gas_price = 100
res = instance.call(to, from_, fun_name, sig, data, gas, gas_price)
print("res: %s" % res)
if __name__ == '__main__':
main()
| Python | 0.998827 | @@ -143,21 +143,32 @@
g)%0A%0A
-from_
+instance.address
= %220x9d
@@ -207,16 +207,17 @@
21d409%22%0A
+%0A
to =
@@ -321,16 +321,17 @@
data =
+%5B
'%5Cx02%5Cx0
@@ -1528,16 +1528,17 @@
x9c%5Cx82'
+%5D
%0A gas
@@ -1541,18 +1541,22 @@
gas = 3
-e6
+000000
%0A gas
@@ -1602,14 +1602,16 @@
o, f
-rom_,
+un_name=
fun_
@@ -1616,16 +1616,20 @@
n_name,
+sig=
sig, dat
@@ -1633,15 +1633,34 @@
data
-, gas,
+=data, gas=gas, gas_price=
gas_
|
736103ea495c89defcae9bf6ab72aa7b89768026 | add start of advisory module | updatebot/advise.py | updatebot/advise.py | Python | 0 | @@ -0,0 +1,872 @@
+#%0A# Copyright (c) 2008 rPath, Inc.%0A#%0A# This program is distributed under the terms of the Common Public License,%0A# version 1.0. A copy of this license should have been distributed with this%0A# source file in a file called LICENSE. If it is not present, the license%0A# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# without any warranty; without even the implied warranty of merchantability%0A# or fitness for a particular purpose. See the Common Public License for%0A# full details.%0A#%0A%0A%22%22%22%0AModule for managing/manipulating advisories.%0A%22%22%22%0A%0Afrom updatebot.errors import *%0A%0Aclass Advisor(object):%0A %22%22%22%0A Class for managing, manipulating, and distributing advisories.%0A %22%22%22%0A%0A def __init__(self, cfg, rpmSource):%0A self._cfg = cfg%0A self._rpmSource = rpmSource%0A
|
|
a571271c396cd43fb3f6ac8109ec1f699b0da498 | Use voluptuous for Aruba (#3119) | homeassistant/components/device_tracker/aruba.py | homeassistant/components/device_tracker/aruba.py | """
Support for Aruba Access Points.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.aruba/
"""
import logging
import re
import threading
from datetime import timedelta
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQUIREMENTS = ['pexpect==4.0.1']
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r'(?P<name>([^\s]+))\s+' +
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s+')
# pylint: disable=unused-argument
def get_scanner(hass, config):
"""Validate the configuration and return a Aruba scanner."""
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = ArubaDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ArubaDeviceScanner(object):
"""This class queries a Aruba Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.lock = threading.Lock()
self.last_results = {}
# Test the router is accessible.
data = self.get_aruba_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['name']
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Ensure the information from the Aruba Access Point is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
data = self.get_aruba_data()
if not data:
return False
self.last_results = data.values()
return True
def get_aruba_data(self):
"""Retrieve data from Aruba Access Point and return parsed result."""
import pexpect
connect = "ssh {}@{}"
ssh = pexpect.spawn(connect.format(self.username, self.host))
query = ssh.expect(['password:', pexpect.TIMEOUT, pexpect.EOF,
'continue connecting (yes/no)?',
'Host key verification failed.',
'Connection refused',
'Connection timed out'], timeout=120)
if query == 1:
_LOGGER.error("Timeout")
return
elif query == 2:
_LOGGER.error("Unexpected response from router")
return
elif query == 3:
ssh.sendline('yes')
ssh.expect('password:')
elif query == 4:
_LOGGER.error("Host key Changed")
return
elif query == 5:
_LOGGER.error("Connection refused by server")
return
elif query == 6:
_LOGGER.error("Connection timed out")
return
ssh.sendline(self.password)
ssh.expect('#')
ssh.sendline('show clients')
ssh.expect('#')
devices_result = ssh.before.split(b'\r\n')
ssh.sendline('exit')
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode('utf-8'))
if match:
devices[match.group('ip')] = {
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
'name': match.group('name')
}
return devices
| Python | 0 | @@ -243,16 +243,95 @@
edelta%0A%0A
+import voluptuous as vol%0A%0Aimport homeassistant.helpers.config_validation as cv%0A
from hom
@@ -380,16 +380,33 @@
t DOMAIN
+, PLATFORM_SCHEMA
%0Afrom ho
@@ -474,58 +474,8 @@
AME%0A
-from homeassistant.helpers import validate_config%0A
from
@@ -868,311 +868,312 @@
')%0A%0A
-%0A# pylint: disable=unused-argument%0Adef get_scanner(hass, config):%0A %22%22%22Validate the configuration and return a Aruba scanner.%22%22%22%0A if not validate_config(config,%0A %7BDOMAIN: %5BCONF_HOST, CONF_USERNAME, CONF_PASSWORD%5D%7D,%0A _LOGGER):%0A return None%0A
+PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(%7B%0A vol.Required(CONF_HOST): cv.string,%0A vol.Required(CONF_PASSWORD): cv.string,%0A vol.Required(CONF_USERNAME): cv.string%0A%7D)%0A%0A%0A# pylint: disable=unused-argument%0Adef get_scanner(hass, config):%0A %22%22%22Validate the configuration and return a Aruba scanner.%22%22%22
%0A
@@ -2889,17 +2889,17 @@
nnect =
-%22
+'
ssh %7B%7D@%7B
@@ -2899,17 +2899,17 @@
sh %7B%7D@%7B%7D
-%22
+'
%0A
@@ -3333,17 +3333,17 @@
ror(
-%22
+'
Timeout
-%22
+'
)%0A
@@ -3410,17 +3410,17 @@
R.error(
-%22
+'
Unexpect
@@ -3442,17 +3442,17 @@
m router
-%22
+'
)%0A
@@ -3608,17 +3608,17 @@
R.error(
-%22
+'
Host key
@@ -3625,17 +3625,17 @@
Changed
-%22
+'
)%0A
@@ -3690,33 +3690,33 @@
_LOGGER.error(
-%22
+'
Connection refus
@@ -3727,17 +3727,17 @@
y server
-%22
+'
)%0A
@@ -3800,17 +3800,17 @@
R.error(
-%22
+'
Connecti
@@ -3821,17 +3821,17 @@
imed out
-%22
+'
)%0A
|
27b9727926139ae2cfde6d3cdcdf5746ed28e03d | Add new package arbor (#11914) | var/spack/repos/builtin/packages/arbor/package.py | var/spack/repos/builtin/packages/arbor/package.py | Python | 0 | @@ -0,0 +1,2693 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Arbor(CMakePackage):%0A %22%22%22Arbor is a high-performance library for computational neuroscience%0A simulations.%22%22%22%0A%0A homepage = %22https://github.com/arbor-sim/arbor/%22%0A url = %22https://github.com/arbor-sim/arbor/archive/v0.2.tar.gz%22%0A%0A version('0.2', sha256='43c9181c03be5f3c9820b2b50592d7b41344f37e1200980119ad347eb7bcf4eb')%0A%0A variant('vectorize', default=False,%0A description='Enable vectorization of computational kernels')%0A variant('gpu', default=False, description='Enable GPU support')%0A variant('mpi', default=False, description='Enable MPI support')%0A variant('python', default=False,%0A description='Enable Python frontend support')%0A variant('unwind', default=False,%0A description='Enable libunwind for pretty stack traces')%0A%0A depends_on('cuda', when='+gpu')%0A depends_on('mpi', when='+mpi')%0A depends_on('libunwind', when='+unwind')%0A%0A extends('[email protected]:', when='+python')%0A depends_on('py-mpi4py', when='+mpi+python', type=('build', 'run'))%0A%0A depends_on('[email protected]:', type='build')%0A # mentioned in documentation but shouldn't be necessary when%0A # using the archive%0A # depends_on('[email protected]:', type='build')%0A%0A # compiler dependencies%0A # depends_on(C++14)%0A # depends_on('[email protected]:', type='build')%0A # depends_on('llvm@4:', type='build')%0A # depends_on('clang-apple@9:', type='build')%0A%0A # when building documentation, this could be an optional dependency%0A depends_on('py-sphinx', type='build')%0A%0A def patch(self):%0A filter_file(%0A r'find_library%5C(_unwind_library_target unwind-%5C$%7Blibunwind_arch%7D',%0A r'find_library(_unwind_library_target unwind-$%7B_libunwind_arch%7D',%0A 'cmake/FindUnwind.cmake'%0A )%0A filter_file(%0A r'target_compile_definitions%5C(arbor-private-deps ARB_WITH_UNWIND%5C)', # noqa: E501%0A r'target_compile_definitions(arbor-private-deps INTERFACE WITH_UNWIND)', # noqa: E501%0A 'CMakeLists.txt'%0A )%0A%0A def cmake_args(self):%0A args = %5B%0A '-DARB_VECTORIZE=' + ('ON' if '+vectorize' in self.spec else 'OFF'), # noqa: E501%0A '-DARB_WITH_GPU=' + ('ON' if '+gpu' in self.spec else 'OFF'),%0A '-DARB_WITH_PYTHON=' + ('ON' if '+python' in self.spec else 'OFF'),%0A %5D%0A%0A if '+unwind' in self.spec:%0A args.append('-DUnwind_ROOT_DIR=%7B0%7D'.format(self.spec%5B'libunwind'%5D.prefix)) # noqa: E501%0A%0A return args%0A
|
|
ac851c402952cf44b24dfdf5277765ff286dd994 | convert embeddingns to js-friendly format | src/convert_embeddings_to_js.py | src/convert_embeddings_to_js.py | Python | 0.999997 | @@ -0,0 +1,971 @@
+import h5py%0Aimport json%0Aimport numpy as np%0A%0Adef load_embeddings(path):%0A f = h5py.File(path, 'r')%0A nemb = f%5B'nemb'%5D%5B:%5D%0A f.close()%0A return nemb%0A%0Adef load_vocab(path):%0A vocab = %5B%5D%0A with open(path, 'rb') as f:%0A for line in f.readlines():%0A split = line.split(' ')%0A vocab.append((split%5B0%5D, int(split%5B1%5D.rstrip())))%0A # ignore UNK at position 0%0A return vocab%5B1:%5D%0A%0Adef write_to_js(words, embeddings, path):%0A word_vecs = %7B%7D%0A for word, embedding in zip(words, embeddings):%0A word_vecs%5Bword%5D = embedding.tolist()%0A with open(path, 'wb') as f:%0A json.dump(word_vecs, f)%0A f.write(';')%0A%0A%0A%0Adef main():%0A nemb = load_embeddings(path='/tmp/embeddings.h5')%0A vocab = load_vocab('/tmp/vocab.txt')%0A words = %5Btup%5B0%5D for tup in vocab%5D%0A # dont use UNK%0A words = words%5B1:%5D%0A nemb = nemb%5B1:%5D%0A # lower precision, faster%0A nemb = nemb.astype(np.float16)%0A write_to_js(words, nemb%5B1:%5D, path='../../word2vecjson/data/foodVecs.js')%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
11efa5583bbeeee7c7823264f6f73715ea81edc0 | Add trivial test for ECO fetching | luigi/tests/ontologies/eco_test.py | luigi/tests/ontologies/eco_test.py | Python | 0.000002 | @@ -0,0 +1,804 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22%0ACopyright %5B2009-2017%5D EMBL-European Bioinformatics Institute%0ALicensed under the Apache License, Version 2.0 (the %22License%22);%0Ayou may not use this file except in compliance with the License.%0AYou may obtain a copy of the License at%0Ahttp://www.apache.org/licenses/LICENSE-2.0%0AUnless required by applicable law or agreed to in writing, software%0Adistributed under the License is distributed on an %22AS IS%22 BASIS,%0AWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0ASee the License for the specific language governing permissions and%0Alimitations under the License.%0A%22%22%22%0A%0Afrom ontologies import eco%0A%0A%0Adef test_can_load_all_eco_terms():%0A source = eco.TermSources(%0A quickgo_file='data/quickgo/rna.gpa'%0A )%0A assert len(list(eco.to_load(source))) == 6%0A
|
|
ec3b080b2f1922f4989b853db45475d185e314de | add all | examples/gcharttestapp/TestGChart00.py | examples/gcharttestapp/TestGChart00.py | Python | 0.000308 | @@ -0,0 +1,400 @@
+%0Aimport GChartTestAppUtil%0Afrom pyjamas.chart.GChart import GChart%0A%0A%0A%22%22%22* Empty chart without anything on it except a title and footnotes %22%22%22%0Aclass TestGChart00 (GChart):%0A def __init__(self):%0A GChart.__init__(self, 150,150)%0A self.setChartTitle(GChartTestAppUtil.getTitle(self))%0A self.setChartFootnotes(%22Check: Consistent with a 'no data' chart (and it doesn't crash).%22)%0A %0A%0A%0A
|
|
4fdba8a1a5a2123843cc9eefd8949fb8996f59b2 | Add a wrapper for ChromeOS to call into telemetry. | telemetry/telemetry/unittest/run_chromeos_tests.py | telemetry/telemetry/unittest/run_chromeos_tests.py | Python | 0.000003 | @@ -0,0 +1,2269 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0Aimport logging%0Aimport os%0Aimport sys%0A%0Afrom telemetry.unittest import gtest_progress_reporter%0Afrom telemetry.unittest import run_tests%0Afrom telemetry.core import util%0A%0A%0Adef RunTestsForChromeOS(browser_type, unit_tests, perf_tests):%0A stream = _LoggingOutputStream()%0A error_string = ''%0A%0A logging.info('Running telemetry unit tests with browser_type %22%25s%22.' %25%0A browser_type)%0A ret = _RunOneSetOfTests(browser_type, 'telemetry',%0A os.path.join('telemetry', 'telemetry'),%0A unit_tests, stream)%0A if ret:%0A error_string += 'The unit tests failed.%5Cn'%0A%0A logging.info('Running telemetry perf tests with browser_type %22%25s%22.' %25%0A browser_type)%0A ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream)%0A if ret:%0A error_string = 'The perf tests failed.%5Cn'%0A%0A return error_string%0A%0A%0Adef _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream):%0A top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir)%0A sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir)%0A%0A sys.path.append(top_level_dir)%0A%0A output_formatters = %5Bgtest_progress_reporter.GTestProgressReporter(stream)%5D%0A run_tests.config = run_tests.Config(top_level_dir, %5Bsub_dir%5D,%0A output_formatters)%0A return run_tests.RunTestsCommand.main(%5B'--browser', browser_type%5D + tests)%0A%0A%0Aclass _LoggingOutputStream(object):%0A%0A def __init__(self):%0A self._buffer = %5B%5D%0A%0A def write(self, s):%0A %22%22%22Buffer a string write. Log it when we encounter a newline.%22%22%22%0A if '%5Cn' in s:%0A segments = s.split('%5Cn')%0A segments%5B0%5D = ''.join(self._buffer + %5Bsegments%5B0%5D%5D)%0A log_level = logging.getLogger().getEffectiveLevel()%0A try: # TODO(dtu): We need this because of crbug.com/394571%0A logging.getLogger().setLevel(logging.INFO)%0A for line in segments%5B:-1%5D:%0A logging.info(line)%0A finally:%0A logging.getLogger().setLevel(log_level)%0A self._buffer = %5Bsegments%5B-1%5D%5D%0A else:%0A self._buffer.append(s)%0A%0A def flush(self): # pylint: disable=W0612%0A pass%0A
|
|
fcce65daf40bb1c198be7ddadee8769bf6feea9b | Create k-order-test.py | k-order-test.py | k-order-test.py | Python | 0.000032 | @@ -0,0 +1,2086 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Thu Mar 6 16:41:40 2014%0A%0A@author: xiao%0A%22%22%22%0A%0Afrom k_order import *%0A#number of items to recommand%0Ap=2%0Afadress = %22/home/xiao/ProjetLibre/matrix/matrixInfo%22%0A%0AreadDataFromFile(fadress)%0AgetDu()%0ArecommendationListe = zeros((m,p))%0A%0A############################################%0A#### We need to recommend top items ####%0A############################################%0A%0A%0A#k=1%0A#recommend top p items for user u%0Adef recommendItems_u(u, p):%0A #initialize recommendation items to be -1: null%0A res = zeros(p)-1%0A D_bar_u = Omega_comp%5Bu%5D%0A r = f_bar_d(D_bar_u, u)%0A %0A indexOrder = argsort(r)%0A indexOrder = indexOrder%5B::-1%5D%0A%0A if len(indexOrder) %3E= p:%0A res = indexOrder%5B:p%5D%0A else:%0A res%5B:len(indexOrder)%5D = indexOrder%0A return res%0A%0A#recommend top p items for all m users%0Adef recommendItems(p): %0A for u in range(m):%0A r = recommendItems_u(u, p)%0A recommendationListe%5Bu,:%5D = r%0A %0Adef f_test(x):%0A return x**2 - 3*x%0A %0Adef test():%0A a = arange(5)%0A b = f_test(a)%0A c = argsort(b)%0A c = c%5B::-1%5D%0A return c%0A %0A#show %0Adef showRecomms():%0A for u in range(m):%0A print %22u:%22, u, %22,%22,recommendationListe%5Bu,:%5D%0A %0Ak_os_AUC()%0ArecommendItems(p)%0AshowRecomms()%0A%0A%0A######################################################%0A#### We need to recommend most relavent users ####%0A######################################################%0A%0A%0A######################################################%0A#### test normal AUC ####%0A######################################################%0A%0A######################################################%0A#### test normal WARP ####%0A######################################################%0A%0A######################################################%0A#### test K-os AUC ####%0A######################################################%0A%0A######################################################%0A#### test k-os WARP ####%0A######################################################%0A
|
|
d73235dd994d3705178d0cff142293444977d764 | Remove bad imports | odo/backends/tests/conftest.py | odo/backends/tests/conftest.py | import os
import shutil
import pytest
@pytest.fixture(scope='session')
def sc():
pytest.importorskip('pyspark')
from pyspark import SparkContext
return SparkContext('local[*]', 'odo')
@pytest.yield_fixture(scope='session')
def sqlctx(sc):
pytest.importorskip('pyspark')
from odo.backends.sparksql import HiveContext, SQLContext, SPARK_ONE_TWO
try:
yield HiveContext(sc) if not SPARK_ONE_TWO else SQLContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath)
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath)
os.remove(logpath)
| Python | 0.000015 | @@ -75,24 +75,34 @@
ef sc():%0A
+ pyspark =
pytest.impo
@@ -129,20 +129,22 @@
-from
+return
pyspark
imp
@@ -143,40 +143,9 @@
park
- import SparkContext%0A return
+.
Spar
@@ -232,16 +232,26 @@
sc):%0A
+ pyspark =
pytest.
@@ -278,154 +278,44 @@
k')%0A
- from odo.backends.sparksql import HiveContext, SQLContext, SPARK_ONE_TWO%0A%0A try:%0A yield HiveContext(sc) if not SPARK_ONE_TWO else SQL
+%0A try:%0A yield pyspark.Hive
Cont
|
4c5a8f018af4377ce3f9367b0c66a51a6cad671b | add __init__.py | eatable/__init__.py | eatable/__init__.py | Python | 0.00212 | @@ -0,0 +1,47 @@
+%0Afrom .table import Table%0Afrom .row import Row%0A
|
|
2b15d2df8333db5f5cd6fcefaf56f5400baba95e | add test_results_table.py | metaseq/test/test_results_table.py | metaseq/test/test_results_table.py | Python | 0.000104 | @@ -0,0 +1,734 @@
+from metaseq import results_table%0Aimport metaseq%0Aimport numpy as np%0A%0Afn = metaseq.example_filename('ex.deseq')%0Ad = results_table.ResultsTable(fn)%0A%0A%0Adef test_dataframe_access():%0A%0A # different ways of accessing get the same data in memory%0A assert d.id is d.data.id%0A assert d%5B'id'%5D is d.data.id%0A%0Adef test_dataframe_subsetting():%0A assert all(d%5B:10%5D.data == d.data%5B:10%5D)%0A assert all(d.update(d.data%5B:10%5D).data == d.data%5B:10%5D)%0A%0Adef test_copy():%0A e = d.copy()%0A e.id = 'a'%0A assert e.id%5B0%5D == 'a'%0A assert d.id%5B0%5D != 'a'%0A%0Adef smoke_tests():%0A #smoke test for repr%0A print repr(d)%0A%0Adef test_db():%0A%0A # should work%0A d.attach_db(None)%0A%0A d.attach_db(metaseq.example_filename('dmel-all-r5.33-cleaned.gff.db'))%0A
|
|
2382c1c9daf2b17799ceb03f42a6917966b3162c | add kattis/cold | Kattis/cold.py | Kattis/cold.py | Python | 0.999545 | @@ -0,0 +1,224 @@
+%22%22%22%0D%0AProblem: cold%0D%0ALink: https://open.kattis.com/problems/cold %0D%0ASource: Kattis%0D%0A%22%22%22%0D%0AN = int(input())%0D%0AA = list(map(int, input().split()))%0D%0A%0D%0Aanswer = 0%0D%0Afor i in range(len(A)):%0D%0A answer += (A%5Bi%5D %3C 0)%0D%0A%0D%0Aprint(answer)%0D%0A
|
|
652a03d96cbc5c06850fa62fa3507fb74ee3deab | Create python_ciphertext.py | Encryption/python_ciphertext.py | Encryption/python_ciphertext.py | Python | 0.999975 | @@ -0,0 +1,369 @@
+#Simply how to make a ciphertext only with 1 line.%0A%0A%3E%3E%3E #hex_encode = 'summonagus'.encode('hex')%0A%3E%3E%3E hex_encode = '73756d6d6f6e61677573'%0A%3E%3E%3E chip = ''.join(%5B str(int(a)*2) if a.isdigit() and int(a) == 3 else str(int(a)/2) if a.isdigit() and int(a) == 6 else a for a in hex_encode %5D)%0A%3E%3E%3E %0A%3E%3E%3E hex_encode%0A'73756d6d6f6e61677573'%0A%3E%3E%3E chip%0A'76753d3d3f3e31377576'%0A%3E%3E%3E %0A%3E%3E%3E %0A
|
|
8add0d44139b527d40aaa9da43d023ddde52c410 | Add string python solution | HackerRank/PYTHON/Strings/alphabet_rangoli.py | HackerRank/PYTHON/Strings/alphabet_rangoli.py | Python | 0.999999 | @@ -0,0 +1,628 @@
+#!/usr/bin/env python3%0A%0Aimport sys%0Afrom string import ascii_lowercase%0A%0Adef print_rangoli(size):%0A width = size * 4 - 3%0A alphabet = (ascii_lowercase%5B0:size%5D)%5B::-1%5D%0A res = %5B%5D%0A for i in range(size):%0A s = ''%0A for a in alphabet%5B0:i+1%5D:%0A s = '%25s-%25s' %25 (s, a)%0A temp = s + s%5B::-1%5D%5B1:%5D%0A if len(temp) == width + 2:%0A temp = temp%5B1:-1%5D%0A res.append(temp)%0A else:%0A res.append(temp.center(width, '-'))%0A%0A print('%5Cn'.join(res))%0A print('%5Cn'.join(list(reversed(res%5B0:size - 1%5D))))%0A%0A%0Aif __name__ == '__main__':%0A n = int(input())%0A print_rangoli(n)%0A
|
|
eac74d731b01f732d23ce21e8132fa0785aa1ab2 | Create visible_elements.py | visible_elements.py | visible_elements.py | Python | 0.000003 | @@ -0,0 +1,968 @@
+# -*- coding: utf-8 -*-%0Aimport unittest%0Afrom selenium import webdriver%0Afrom selenium.webdriver.support.wait import WebDriverWait%0Afrom selenium.webdriver.common.by import By%0A%0Aclass visible_elements(unittest.TestCase):%0A%0A def setUp(self):%0A self.driver = webdriver.Chrome(%22C://chromedriver/chromedriver.exe%22) %0A self.driver.maximize_window()%0A wait = WebDriverWait(self.driver, 10)%0A%0A%0A def test_clickelements(self):%0A self.driver.get(%22http://localhost/litecart/en/%22)%0A %0A rows = self.driver.find_elements_by_xpath(%22//li%5B@class='product column shadow hover-light'%5D%22)%0A%0A def are_elements_present(self, *args):%0A return len(self.driver.find_elements(*args)) == 1 %0A%0A are_elements_present(self, By.XPATH, %22//div%5B@class='sticker sale'%5D%22 and %22//div%5B@class='sticker new'%5D%22 ) in rows %0A%0A %0A def tearDown(self):%0A self.driver.close()%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A%0A
|
|
eaa45d8a9a8cd26379ea7bd3bcee99cbab08d9e7 | Remove hdf5 ~cxx constraint on netcdf | var/spack/repos/builtin/packages/netcdf/package.py | var/spack/repos/builtin/packages/netcdf/package.py | from spack import *
class Netcdf(Package):
"""NetCDF is a set of software libraries and self-describing, machine-independent
data formats that support the creation, access, and sharing of array-oriented
scientific data."""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.3.3.tar.gz"
version('4.4.0', 'cffda0cbd97fdb3a06e9274f7aef438e')
version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
variant('mpi', default=True, description='Enables MPI parallelism')
variant('hdf4', default=False, description="Enable HDF4 support")
# Dependencies:
depends_on("curl") # required for DAP support
depends_on("hdf", when='+hdf4')
depends_on("hdf5+mpi~cxx", when='+mpi') # required for NetCDF-4 support
depends_on("hdf5~mpi", when='~mpi') # required for NetCDF-4 support
depends_on("zlib") # required for NetCDF-4 support
depends_on("m4")
def install(self, spec, prefix):
# Environment variables
CPPFLAGS = []
LDFLAGS = []
LIBS = []
config_args = [
"--prefix=%s" % prefix,
"--enable-fsync",
"--enable-v2",
"--enable-utilities",
"--enable-shared",
"--enable-static",
"--enable-largefile",
# necessary for HDF5 support
"--enable-netcdf-4",
"--enable-dynamic-loading",
# necessary for DAP support
"--enable-dap"
]
# Make sure Netcdf links against Spack's curl
# Otherwise it may pick up system's curl, which could lead to link errors:
# /usr/lib/x86_64-linux-gnu/libcurl.so: undefined reference to `SSL_CTX_use_certificate_chain_file@OPENSSL_1.0.0'
LIBS.append("-lcurl")
CPPFLAGS.append("-I%s" % spec['curl'].prefix.include)
LDFLAGS.append ("-L%s" % spec['curl'].prefix.lib)
if '+mpi' in spec:
config_args.append('--enable-parallel4')
CPPFLAGS.append("-I%s/include" % spec['hdf5'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['hdf5'].prefix)
# HDF4 support
# As of NetCDF 4.1.3, "--with-hdf4=..." is no longer a valid option
# You must use the environment variables CPPFLAGS and LDFLAGS
if '+hdf4' in spec:
config_args.append("--enable-hdf4")
CPPFLAGS.append("-I%s/include" % spec['hdf'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['hdf'].prefix)
LIBS.append( "-l%s" % "jpeg")
if 'szip' in spec:
CPPFLAGS.append("-I%s/include" % spec['szip'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['szip'].prefix)
LIBS.append( "-l%s" % "sz")
# Fortran support
# In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
# Use the netcdf-fortran package to install Fortran support.
config_args.append('CPPFLAGS=%s' % ' '.join(CPPFLAGS))
config_args.append('LDFLAGS=%s' % ' '.join(LDFLAGS))
config_args.append('LIBS=%s' % ' '.join(LIBS))
configure(*config_args)
make()
make("install")
| Python | 0.000001 | @@ -501,16 +501,17 @@
t('mpi',
+
default
@@ -516,16 +516,17 @@
lt=True,
+
descrip
@@ -576,19 +576,16 @@
('hdf4',
-
default
@@ -604,17 +604,17 @@
ription=
-%22
+'
Enable H
@@ -628,82 +628,32 @@
port
-%22
+'
)%0A%0A
-# Dependencies:%0A depends_on(%22curl%22) # required for DAP support
+depends_on(%22m4%22)
%0A
@@ -689,56 +689,16 @@
4')%0A
+%0A
- depends_on(%22hdf5+mpi~cxx%22, when='+mpi')
#
-r
+R
equi
@@ -701,32 +701,27 @@
equired for
-NetCDF-4
+DAP
support%0A
@@ -737,36 +737,23 @@
on(%22
-hdf5~mpi%22, when='~mpi')
+curl%22)%0A%0A
#
-r
+R
equi
@@ -803,61 +803,87 @@
ib%22)
- # required for NetCDF-4 support%0A depends_on(%22m4%22
+%0A depends_on(%22hdf5+mpi%22, when='+mpi')%0A depends_on(%22hdf5~mpi%22, when='~mpi'
)%0A%0A
@@ -1833,10 +1833,10 @@
pend
-
(
+
%22-L%25
|
5343c89686fd05cf251388e1f28bfd4343d4c277 | Add python-based CPU implementation | src/CPU/color_histogram.py | src/CPU/color_histogram.py | Python | 0.000074 | @@ -0,0 +1,188 @@
+from PIL import Image%0Afrom collections import defaultdict%0Aimport sys%0A%0Aim = Image.open(sys.argv%5B1%5D)%0Acolors = defaultdict(int)%0Afor pixel in im.getdata():%0A colors%5Bpixel%5D += 1%0Aprint colors%0A
|
|
f408465521484032631adfe9dced21119ad2bf82 | Revert "Delete old MultiServer implementation" | MultiServer.py | MultiServer.py | Python | 0 | @@ -0,0 +1,421 @@
+from multiprocessing import Process%0Aimport subprocess%0Aimport GlobalVars%0A%0A%0Adef botInstance(server, channels):%0A args = %5B%22python%22, %22hubbebot.py%22%5D%0A args.append(server)%0A for chan in channels:%0A args.append(chan)%0A subprocess.call(args)%0A%0A%0Aif __name__ == %22__main__%22:%0A for (server,channels) in GlobalVars.connections.items():%0A p = Process(target=botInstance, args=(server, channels))%0A p.start()%0A
|
|
2ef9618e705bb293641674ca5e7cc1f14daf3483 | Set default branding for all organisations | migrations/versions/0285_default_org_branding.py | migrations/versions/0285_default_org_branding.py | Python | 0 | @@ -0,0 +1,1116 @@
+%22%22%22empty message%0A%0ARevision ID: 0285_default_org_branding%0ARevises: 0284_0283_retry%0ACreate Date: 2016-10-25 17:37:27.660723%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '0285_default_org_branding'%0Adown_revision = '0284_0283_retry'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0ABRANDING_TABLES = ('email_branding', 'letter_branding')%0A%0A%0Adef upgrade():%0A for branding in BRANDING_TABLES:%0A op.execute(%22%22%22%0A UPDATE%0A organisation%0A SET%0A %7Bbranding%7D_id = %7Bbranding%7D.id%0A FROM%0A %7Bbranding%7D%0A WHERE%0A %7Bbranding%7D.domain in (%0A SELECT%0A domain%0A FROM%0A domain%0A WHERE%0A domain.organisation_id = organisation.id%0A )%0A %22%22%22.format(branding=branding))%0A%0Adef downgrade():%0A for branding in BRANDING_TABLES:%0A op.execute(%22%22%22%0A UPDATE%0A organisation%0A SET%0A %7Bbranding%7D_id = null%0A %22%22%22.format(branding=branding))%0A
|
|
52b870d36370f46fdc33de2948504c2aec8db1a1 | fix field names in network object | planetstack/core/migrations/0002_network_field_case.py | planetstack/core/migrations/0002_network_field_case.py | Python | 0.000003 | @@ -0,0 +1,1095 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport timezones.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='networktemplate',%0A old_name='controllerKind',%0A new_name='controller_kind',%0A ),%0A migrations.RenameField(%0A model_name='networktemplate',%0A old_name='guaranteedBandwidth',%0A new_name='guaranteed_bandwidth',%0A ),%0A migrations.RenameField(%0A model_name='networktemplate',%0A old_name='sharedNetworkId',%0A new_name='shared_network_id',%0A ),%0A migrations.RenameField(%0A model_name='networktemplate',%0A old_name='sharedNetworkName',%0A new_name='shared_network_name',%0A ),%0A migrations.RenameField(%0A model_name='networktemplate',%0A old_name='topologyKind',%0A new_name='topology_kind',%0A ),%0A %5D%0A
|
|
9b584c6d23ad93fd497fb2e71d2343a954cea4e5 | Create PaulFinalproject.py | PaulFinalproject.py | PaulFinalproject.py | Python | 0 | @@ -0,0 +1 @@
+%0A
|
|
8462466f8a21f25f85b8a06076877361b2545a12 | Add initialize script | PyResis/__init__.py | PyResis/__init__.py | Python | 0.000002 | @@ -0,0 +1,22 @@
+__author__ = 'Yu Cao'%0A
|
|
8fcc727f9a7fbd886bc900f9c24cf2711a0c5b99 | Create Record.py | Record.py | Record.py | Python | 0.000001 | @@ -0,0 +1,1730 @@
+%22%22%22%0AThe MIT License (MIT)%0A%0ACopyright (c) %3C2016%3E %3CLarry McCaig (aka: Larz60+ aka: Larz60p)%3E%0A%0APermission is hereby granted, free of charge, to any person obtaining a%0Acopy of this software and associated documentation files (the %22Software%22),%0Ato deal in the Software without restriction, including without limitation%0Athe rights to use, copy, modify, merge, publish, distribute, sublicense,%0Aand/or sell copies of the Software, and to permit persons to whom the%0ASoftware is furnished to do so, subject to the following conditions:%0A%0AThe above copyright notice and this permission notice shall be included in%0Aall copies or substantial portions of the Software.%0A%0ATHE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0AIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0AFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0AAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0ALIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0AOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0ATHE SOFTWARE.%0A%0A%22%22%22%0Afrom collections import namedtuple%0Aimport json%0A%0A%0Aclass Record(object):%0A def __init__(self, filename=None):%0A with open(filename, 'r') as f:%0A self.j = f.read()%0A self.record = json.loads(self.j, object_hook=lambda j:%0A namedtuple('data', j.keys())(*j.values()))%0A self.recindex = len(self.record)%0A self.index = 0%0A%0A def __iter__(self):%0A self.index = self.recindex%0A return self%0A%0A def __next__(self):%0A if self.index == 0:%0A raise StopIteration%0A self.index -= 1%0A return self.record%5Bself.index%5D%0A
|
|
0bbddd9b3708c167a4ce75c3cccdc5c1804714ed | fix for issue #8 | aclgen.py | aclgen.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is an sample tool which will render policy
# files into usable iptables tables, cisco access lists or
# juniper firewall filters.
# system imports
import dircache
from optparse import OptionParser
import os
import stat
# compiler imports
from lib import naming
from lib import policy
# renderer imports
from lib import cisco
from lib import iptables
from lib import juniper
from lib import silverpeak
# TODO(pmoody): get rid of this global variable.
output_policy_dict = {}
parser = OptionParser()
parser.add_option('-d', '--def',
dest='definitions',
help='defintions directory',
default='./def')
parser.add_option('-o', '--output_directory',
dest='output_directory',
help='output directory',
default='./filters')
parser.add_option('-p', '--pol',
dest='policy',
help='policy file')
parser.add_option('', '--poldir',
dest='policy_directory',
help='policy directory',
default='./policies')
(FLAGS, args) = parser.parse_args()
def render_policy(pol_txt, input_file, output_directory, pol_suffix):
"""Store the string representation of the rendered policy."""
input_file = input_file.lstrip('./')
output_dir = '/'.join([output_directory] + input_file.split('/')[1:-1])
fname = '%s%s' % (os.path.basename(input_file).split('.')[0], pol_suffix)
output_file = os.path.join(output_dir, fname)
if output_file in output_policy_dict:
output_policy_dict[output_file] += pol_txt
else:
output_policy_dict[output_file] = pol_txt
def output_policies():
"""Actually write the policies to disk overwriting existing files..
If the output directory doesn't exist, create it.
"""
for output_file in output_policy_dict:
if not os.path.isdir(os.path.dirname(output_file)):
os.mkdir(os.path.dirname(output_file))
output = open(output_file, 'w')
if output:
print 'writing %s' % output_file
output.write(output_policy_dict[output_file])
def load_policies(base_dir):
"""Recurssively load the polices in a given directory."""
policies = []
for dirfile in dircache.listdir(base_dir):
fname = os.path.join(base_dir, dirfile)
if os.path.isdir(fname):
policies.extend(load_policies(fname))
elif fname.endswith('.pol'):
policies.append(fname)
return policies
def parse_policies(policies, defs):
"""Parse and store the rendered policies."""
for pol in policies:
jcl = False
acl = False
ipt = False
spk = False
p = policy.ParsePolicy(open(pol).read(), defs)
for header in p.headers:
if 'juniper' in header.platforms:
jcl = True
if 'cisco' in header.platforms:
acl = True
if 'iptables' in header.platforms:
ipt = True
if 'silverpeak' in header.platforms:
spk = True
if jcl:
j_obj = juniper.Juniper(p)
render_policy(str(j_obj), pol, FLAGS.output_directory, j_obj._SUFFIX)
if acl:
c_obj = cisco.Cisco(p)
render_policy(str(c_obj), pol, FLAGS.output_directory, c_obj._SUFFIX)
if ipt:
i_obj = iptables.Iptables(p)
render_policy(str(i_obj), pol, FLAGS.output_directory, i_obj._SUFFIX)
if spk:
# Silverpeak module has two output files, .spk and .conf
# create output for both, then render both output files
silverpeak_obj = silverpeak.Silverpeak(p)
silverpeak_acl_text = silverpeak_obj.GenerateACLString()
silverpeak_conf_text = silverpeak_obj.GenerateConfString()
# acl output (.spk)
render_policy(silverpeak_acl_text, pol, FLAGS.output_directory,
silverpeak_obj._SUFFIX)
# conf output (.conf)
render_policy(silverpeak_conf_text, pol, FLAGS.output_directory,
silverpeak_obj._CONF_SUFFIX)
def main():
"""the main entry point."""
# first, load our naming
if not FLAGS.definitions:
parser.error('no definitions supplied')
defs = naming.Naming(FLAGS.definitions)
if not defs:
print 'problem loading definitions'
return
policies_to_render = []
if FLAGS.policy_directory:
if FLAGS.policy and FLAGS.policy_directory != './policies':
raise ValueError('policy and policy_directory are mutually exclusive')
policies_to_render = load_policies(FLAGS.policy_directory)
elif FLAGS.policy:
policies_to_render.append(FLAGS.policy)
parse_policies(policies_to_render, defs)
output_policies()
if __name__ == '__main__':
main()
| Python | 0 | @@ -1997,16 +1997,25 @@
%25s%25s' %25
+(%22.%22.join
(os.path
@@ -2048,17 +2048,21 @@
t('.')%5B0
-%5D
+:-1%5D)
, pol_su
|
1708eb17fb9c232414b0e162754ca31b6fd9366c | Add tests for plagiarism filter command | services/comprehension/main-api/comprehension/tests/management/commands/test_pre_filter_responses.py | services/comprehension/main-api/comprehension/tests/management/commands/test_pre_filter_responses.py | Python | 0 | @@ -0,0 +1,2336 @@
+import csv%0A%0Afrom io import StringIO%0Afrom unittest.mock import call, MagicMock, patch%0A%0Afrom django.test import TestCase%0A%0Afrom ....views.plagiarism import PlagiarismFeedbackView%0Afrom ....management.commands import pre_filter_responses%0A%0ACommand = pre_filter_responses.Command%0A%0A%0Aclass TestCommandBase(TestCase):%0A def setUp(self):%0A self.command = Command()%0A%0A%0Aclass TestPreFilterResponsesCommand(TestCommandBase):%0A def test_add_arguments(self):%0A mock_parser = MagicMock()%0A self.command.add_arguments(mock_parser)%0A%0A self.assertEqual(mock_parser.add_argument.call_count, 2)%0A mock_parser.assert_has_calls(%5B%0A call.add_argument('passage_source', metavar='PASSAGE_SOURCE',%0A help='The path to the file with the passage'),%0A call.add_argument('csv_input', metavar='CSV_PATH',%0A help='The path to the input CSV file'),%0A %5D)%0A%0A @patch.object(PlagiarismFeedbackView, '_check_is_plagiarism')%0A @patch.object(Command, '_retrieve_passage')%0A @patch.object(csv, 'reader')%0A @patch.object(csv, 'writer')%0A @patch(f'%7Bpre_filter_responses.__name__%7D.open')%0A def test_extract_create_feedback_kwargs(self, mock_open, mock_writer,%0A mock_reader, mock_retrieve,%0A mock_check_plagiarism):%0A mock_csv_input = 'MOCK_CSV_INPUT'%0A kwargs = %7B%0A 'passage_source': 'MOCK_PASSAGE_SOURCE',%0A 'csv_input': mock_csv_input,%0A %7D%0A file_name = 'FAKE FILE NAME'%0A mock_handler = mock_open.return_value%0A mock_file_content = StringIO('HEADER%5CnVALUE')%0A mock_handler.__enter__.return_value = mock_file_content%0A%0A mock_reader_row = 'MOCK_ROW'%0A mock_reader.next.return_value = mock_reader_row%0A%0A mock_check_plagiarism.return_value = False%0A%0A self.command.handle(**kwargs)%0A%0A mock_open.assert_has_calls(%5B%0A call(mock_csv_input, 'r'),%0A call().__enter__(),%0A call(f'filtered_%7Bmock_csv_input%7D', 'w'),%0A call().__enter__(),%0A call().__exit__(None, None, None),%0A call().__exit__(None, None, None),%0A %5D)%0A%0A mock_retrieve.assert_called_with(kwargs%5B'passage_source'%5D)%0A%0A mock_writer.assert_called()%0A
|
|
06ced5abe2226a234c2e2887fbf84f18dfa7ddc4 | Update timer for new label. Clean up a bit and use more pyglet 1.1 features. | examples/timer.py | examples/timer.py | from pyglet import window
from pyglet import text
from pyglet import clock
from pyglet import font
w = window.Window(fullscreen=True)
class Timer(text.Label):
def stop(self):
self.__time = 0
def reset(self):
self.__time = 0
self.__running = False
self.text = '00:00'
def animate(self, dt):
if self.__running:
self.__time += dt
m, s = divmod(self.__time, 60)
self.text = '%02d:%02d'%(m, s)
def on_text(self, text):
if text == ' ':
self.__running = not self.__running
return True
return False
ft = font.load('', 360)
timer = Timer('00:00', ft, x=w.width//2, y=w.height//2,
valign='center', halign='center')
timer.reset()
clock.schedule(timer.animate)
w.push_handlers(timer)
while not w.has_exit:
w.dispatch_events()
clock.tick()
w.clear()
timer.draw()
w.flip()
| Python | 0 | @@ -1,108 +1,284 @@
-from pyglet impor
+'''A full-screen minute:second timer. Leave i
t
-w
in
-dow%0Afrom pyglet import text%0Afrom pyglet import clock%0Afrom pyglet import font%0A%0Aw =
+ charge of your conference%0Alighting talks.%0A%0AAfter 5 minutes, the timer goes red. This limit is easily adjustable by%0Ahacking the source code.%0A%0APress spacebar to start, stop and reset the timer.%0A'''%0A%0Aimport pyglet%0A%0Awindow = pyglet.
wind
@@ -321,18 +321,14 @@
mer(
-text.Label
+object
):%0A
@@ -334,20 +334,24 @@
def
-stop
+__init__
(self):%0A
@@ -359,34 +359,247 @@
self.
-__time = 0
+label = pyglet.text.Label('00:00', font_size=360, %0A x=window.width//2, y=window.height//2,%0A valign='center', halign='center')%0A self.reset()%0A
%0A def res
@@ -617,26 +617,24 @@
self.
-__
time = 0%0A
@@ -635,34 +635,32 @@
0%0A self.
-__
running = False%0A
@@ -664,32 +664,38 @@
se%0A self.
+label.
text = '00:00'%0A
@@ -701,16 +701,64 @@
-def anim
+ self.label.color = (255, 255, 255, 255)%0A%0A def upd
ate(
@@ -780,26 +780,24 @@
if self.
-__
running:%0A
@@ -806,26 +806,24 @@
self.
-__
time += dt%0A
@@ -852,18 +852,16 @@
od(self.
-__
time, 60
@@ -879,16 +879,22 @@
self.
+label.
text = '
@@ -907,117 +907,211 @@
02d'
-%25
+ %25
(m, s)%0A
-%0A
-def on_text(self, text):%0A if text == ' ':%0A self.__running = not self.__
+ if m %3E= 5:%0A self.label.color = (180, 0, 0, 255)%[email protected]%0Adef on_key_press(symbol, modifiers):%0A if symbol == pyglet.window.key.SPACE:%0A if timer.
running
+:
%0A
@@ -1123,295 +1123,267 @@
-return True%0A return False%0A%0Aft = font.load('', 360)%0Atimer = Timer('00:00', ft, x=w.width//2, y=w.height//2,%0A valign='center', halign='center')%0Atimer.reset()%0Aclock.schedule(timer.animate)%0Aw.push_handlers(timer)%0A%0Awhile not w.has_exit:%0A w.dispatch_events()%0A clock.tick
+timer.running = False%0A else:%0A if timer.time %3E 0:%0A timer.reset()%0A else:%0A timer.running = True%0A elif symbol == pyglet.window.key.ESCAPE:%0A window.close()%0A%[email protected]%0Adef on_draw
()
+:
%0A
+windo
w.cl
@@ -1402,25 +1402,100 @@
mer.
+label.
draw()%0A
- w.flip
+%0Atimer = Timer()%0Apyglet.clock.schedule_interval(timer.update, 1)%0Apyglet.app.run
()%0A%0A
|
064c1a5bd8790c9ea407f62de0428657354e979f | Create jcolor.py | jcolor.py | jcolor.py | Python | 0.000001 | @@ -0,0 +1,729 @@
+# colors%0AHEADER = '%5C033%5B95m'%0AFAIL = '%5C033%5B91m'%0AFGBLUE2 = '%5C033%5B94m'%0AFGGREEN2 = '%5C033%5B92m'%0AFGORANGE = '%5C033%5B93m'%0AFGGRAY = '%5C033%5B30m'%0AFGRED = '%5C033%5B31m'%0AFGGREEN = '%5C033%5B32m'%0AFGYELLOW = '%5C033%5B33m'%0AFGBLUE = '%5C033%5B34m'%0AFGMAG = '%5C033%5B35m'%0AFGCYAN = '%5C033%5B36m'%0AFGWHITE = '%5C033%5B37m'%0A# FGGRAY = '%5C033%5B61m'%0ABGBLACK = '%5C033%5B40m'%0ABGRED = '%5C033%5B41m'%0ABGGREEN = '%5C033%5B42m'%0ABGYELLOW = '%5C033%5B43m'%0ABGBLUE = '%5C033%5B44m'%0ABGMAG = '%5C033%5B45m'%0ABGCYAN = '%5C033%5B46m'%0ABGWHITE = '%5C033%5B47m'%0A %0A# end color(s)%0AENDC = '%5C033%5B0m'%0A %0A# format settings%0ABOLDON = '%5C033%5B1m'%0ABOLDOFF = '%5C033%5B22m'%0AITALON = '%5C033%5B3m'%0AITALOFF = '%5C033%5B23m'%0AUNDLNON = '%5C033%5B4m'%0AUNDLNOFF = '%5C033%5B24m'%0AINVON = '%5C033%5B7m'%0AINVOFF = '%5C033%5B27m'%0ASTRKTHRUON = '%5C033%5B9m'%0ASTRKTHRUOFF = '%5C033%5B29m'%0A
|
|
040911e2343ec6753c767eff44be2cf54eb33ff8 | add file name to fasta sequence headers | add_file_name_to_reads.py | add_file_name_to_reads.py | Python | 0 | @@ -0,0 +1,300 @@
+import os%0Aimport sys%0Afrom Bio import SeqIO%0A%0Aout = open(sys.argv%5B2%5D, 'w')%0Afor records in SeqIO.parse(open(sys.argv%5B1%5D, 'rU'), %22fasta%22):%0A%09records.id = records.id.strip() + '%25s' %25 sys.argv%5B1%5D.split('.')%5B0%5D%0A%09records.name = records.id%0A%09records.description = records.id%0A%09SeqIO.write(records, out, 'fasta')%0A
|
|
c420f6bf996c53fa8958956626c136ac0e9e55f6 | Add sonos updater plugin. | beetsplug/sonosupdate.py | beetsplug/sonosupdate.py | Python | 0 | @@ -0,0 +1,1938 @@
+# -*- coding: utf-8 -*-%0A# This file is part of beets.%0A# Copyright 2018, Tobias Sauerwein.%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining%0A# a copy of this software and associated documentation files (the%0A# %22Software%22), to deal in the Software without restriction, including%0A# without limitation the rights to use, copy, modify, merge, publish,%0A# distribute, sublicense, and/or sell copies of the Software, and to%0A# permit persons to whom the Software is furnished to do so, subject to%0A# the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be%0A# included in all copies or substantial portions of the Software.%0A%0A%22%22%22Updates a Sonos library whenever the beets library is changed.%0AThis is based on the Kodi Update plugin.%0A%0APut something like the following in your config.yaml to configure:%0A kodi:%0A host: localhost%0A port: 8080%0A user: user%0A pwd: secret%0A%22%22%22%0Afrom __future__ import division, absolute_import, print_function%0A%0Afrom beets import config%0Afrom beets.plugins import BeetsPlugin%0Aimport six%0Aimport soco%0A%0A%0Aclass SonosUpdate(BeetsPlugin):%0A def __init__(self):%0A super(SonosUpdate, self).__init__()%0A self.register_listener('database_change', self.listen_for_db_change)%0A%0A def listen_for_db_change(self, lib, model):%0A %22%22%22Listens for beets db change and register the update%22%22%22%0A self.register_listener('cli_exit', self.update)%0A%0A def update(self, lib):%0A %22%22%22When the client exists try to send refresh request to a Sonos%0A controler.%0A %22%22%22%0A self._log.info(u'Requesting a Sonos library update...')%0A%0A # Try to send update request.%0A try:%0A device = soco.discovery.any_soco()%0A device.music_library.start_library_update()%0A%0A except:%0A self._log.warning(u'Sonos update failed')%0A return%0A%0A self._log.info(u'Sonos update triggered')%0A
|
|
797114781ed4f31c265c58a76e39aa8ff6a16443 | Add missing file from last commit | tensorpack/utils/compatible_serialize.py | tensorpack/utils/compatible_serialize.py | Python | 0.000001 | @@ -0,0 +1,548 @@
+#!/usr/bin/env python%0A%0Aimport os%0Afrom .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow%0A%0A%22%22%22%0ASerialization that has compatibility guarantee (therefore is safe to store to disk).%0A%22%22%22%0A%0A__all__ = %5B'loads', 'dumps'%5D%0A%0A%0A# pyarrow has no compatibility guarantee%0A# use msgpack for persistent serialization, unless explicitly set from envvar%0Aif os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack':%0A loads = loads_msgpack%0A dumps = dumps_msgpack%0Aelse:%0A loads = loads_pyarrow%0A dumps = dumps_pyarrow%0A
|
|
2f155e1dafd5302dfbf4607af81bfa979046be8e | add test file | junk/t.py | junk/t.py | Python | 0.000001 | @@ -0,0 +1,28 @@
+def f():%0A print %22hi%22%0A%0Af()
|
|
cfb39d7389d63a293dc075d420f80276a34df193 | Add minimal pygstc example to play a video | examples/pygstc/simple_pipeline.py | examples/pygstc/simple_pipeline.py | Python | 0 | @@ -0,0 +1,1557 @@
+import time%0Aimport sys%0Afrom pygstc.gstc import *%0Afrom pygstc.logger import *%0A%0A#Create a custom logger with loglevel=DEBUG%0Agstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG')%0A%0A#Create the client with the logger%0Agstd_client = GstdClient(logger=gstd_logger)%0A%0Adef printError():%0A print(%22To play run: python3 simple_pipeline.py play VIDEO_PATH%22)%0A print(%22To stop run: python3 simple_pipeline.py stop%22)%0A print(%22To stop run: python3 simple_pipeline.py reverse%22)%0A print(%22To stop run: python3 simple_pipeline.py slow_motion%22)%0A%0Aif(len(sys.argv) %3E 1):%0A if(sys.argv%5B1%5D==%22play%22):%0A%0A FILE_SOURCE = sys.argv%5B2%5D%0A #pipeline is the string with the pipeline description%0A pipeline = %22playbin uri=file:%22+FILE_SOURCE%0A%0A #Following instructions create and play the pipeline%0A gstd_client.pipeline_create (%22p0%22, pipeline)%0A gstd_client.pipeline_play (%22p0%22)%0A %0A print(%22Playing%22)%0A %0A # Check this %0A # reverse and slow motion restart the pipeline%0A elif(sys.argv%5B1%5D== %22reverse%22):%0A gstd_client.event_seek(%22p0%22, rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1)%0A print(%22Playing in reverse%22)%0A%0A elif(sys.argv%5B1%5D== %22slow_motion%22):%0A gstd_client.event_seek(%22p0%22, rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1)%0A print(%22Playing in slow motion%22)%0A%0A elif(sys.argv%5B1%5D== %22stop%22):%0A #Following instructions stop and delete the pipeline%0A gstd_client.pipeline_stop (%22p0%22)%0A gstd_client.pipeline_delete (%22p0%22)%0A print(%22Pipeline deleted%22)%0A%0A else:%0A printError()%0Aelse:%0A printError()%0A
|
|
f8d06f85e896c1098f58667c161d920f6d255d7b | Add utility for sent mail | sendmail/log_mail.py | sendmail/log_mail.py | Python | 0 | @@ -0,0 +1,1703 @@
+# -*- coding: utf-8 -*-%0A###############################################################################%0A#%0A# Copyright (C) 2001-2014 Micronaet SRL (%3Chttp://www.micronaet.it%3E).%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published%0A# by the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A###############################################################################%0Aimport os%0Aimport sys%0Afrom smtplib import SMTP%0Afrom datetime import datetime%0A%0A# Parameter:%0Asmtp_host = 'smtp.qboxmail.com'%0Asmtp_port = 465%0Asmtp_user = '[email protected]''%0Asmtp_password = 'password'%0Afrom_address = '[email protected]' %0Ato_address = '[email protected]'%0Asubject = 'Subject'%0Abody = 'body'%0A%0A# Send mail:%0Asmtp = SMTP()%0Asmtp.set_debuglevel(0)%0Asmtp.connect(smtp_host, smtp_port)%0Asmtp.login(smtp_user, smtp_password)%0A%0Adate = datetime.now().strftime('%25Y-%25m-%25s %25H:%25M')%0Asmtp.sendmail(%0A from_addr, to_addr,%0A 'From: %25s%5CnTo: %25s%5CnSubject: %25s%5CnDate: %25s%5Cn%5Cn%25s' %25 (%0A from_addr,%0A to_addr,%0A subject,%0A date,%0A body,%0A ),%0A )%0Asmtp.quit()%0A %0A%0A# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:%0A
|
|
ddac657da2743c7435e8408677406d37eaea5836 | Add migration. | instance/migrations/0041_auto_20160420_1409.py | instance/migrations/0041_auto_20160420_1409.py | Python | 0 | @@ -0,0 +1,1137 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('instance', '0040_auto_20160420_0754'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='openstackserver',%0A name='status',%0A field=models.CharField(choices=%5B('pending', 'Pending'), ('building', 'Building'), ('booting', 'Booting'), ('ready', 'Ready'), ('terminated', 'Terminated'), ('unknown', 'Unknown'), ('failed', 'BuildFailed')%5D, max_length=20, db_index=True, default='pending'),%0A ),%0A migrations.RunSQL(%0A %5B%0A %22UPDATE instance_openstackserver SET status = 'pending' WHERE status = 'new'%22,%0A %22UPDATE instance_openstackserver SET status = 'building' WHERE status = 'started'%22,%0A %22UPDATE instance_openstackserver SET status = 'booting' WHERE status = 'active' OR status = 'rebooting'%22,%0A %22UPDATE instance_openstackserver SET status = 'ready' WHERE status = 'booted' OR status = 'provisioning'%22,%0A %5D,%0A )%0A %5D%0A
|
|
4c6964a6043c6c5bb3df7ad184e2c6a5537ca6da | Create __init__.py | intelmq/tests/bots/experts/fqdn2ip/__init__.py | intelmq/tests/bots/experts/fqdn2ip/__init__.py | Python | 0.000429 | @@ -0,0 +1 @@
+%0A
|
|
ce5ca3ac3268af331150f66865072a049869b3b2 | add abstraction magics | abstraction.py | abstraction.py | Python | 0.000088 | @@ -0,0 +1,1153 @@
+%22%22%22%0Aabstraction magics%0A%0Alet's you turn a cell into a function%0A%0AIn %5B1%5D: plot(x, f(y))%0A ...: xlabel('x')%0A ...: ylabel('y')%0A%0AIn %5B2%5D: %25functionize 1%0A%22%22%22%0Afrom IPython.utils.text import indent%0A%0Adef parse_ranges(s):%0A blocks = s.split(',')%0A ranges = %5B%5D%0A for block in blocks:%0A if '-' in block:%0A start, stop = %5B int(b) for b in block.split('-') %5D%0A stop = stop + 1 # be inclusive?%0A else:%0A start = int(block)%0A stop = start + 1%0A ranges.append((start, stop))%0A return ranges%0A%0Adef functionize(line):%0A shell = get_ipython()%0A splits = line.split(' ', 1)%0A range_str = splits%5B0%5D%0A args = splits%5B1%5D if len(splits) %3E 1 else ''%0A %0A ranges = parse_ranges(range_str)%0A get_range = shell.history_manager.get_range%0A %0A blocks = %5B%22def cell_function(%25s):%22 %25 args%5D%0A for start, stop in ranges:%0A cursor = get_range(0, start, stop)%0A for session_id, cell_id, code in cursor:%0A blocks.append(indent(code))%0A %0A code = '%5Cn'.join(blocks)%0A shell.set_next_input(code)%0A%0A%0Adef load_ipython_extension(ip):%0A ip.magics_manager.register_function(functionize)
|
|
9af5c4e79234a47ac26e5d1890e70f741363b18a | Create factorise_test.py | factorise_test.py | factorise_test.py | Python | 0.000001 | @@ -0,0 +1 @@
+%0A
|
|
425a8e26d371038f6ebf7c80dd7faea0f1dd906e | Add base test for admin endpoints [WAL-883] | nodeconductor/core/tests/unittests/test_admin.py | nodeconductor/core/tests/unittests/test_admin.py | Python | 0 | @@ -0,0 +1,1918 @@
+from django.contrib import admin%0Afrom django.contrib.auth import get_user_model%0Afrom django.test import TestCase%0Afrom django.urls import reverse%0A%0A%0AUser = get_user_model()%0A%0A%0Aclass TestAdminEndpoints(TestCase):%0A%0A def setUp(self):%0A user, _ = User.objects.get_or_create(username='username', is_staff=True)%0A self.client.force_login(user)%0A self.admin_site_name = admin.site.name%0A%0A def _reverse_url(self, path):%0A return reverse('%25s:%25s' %25 (self.admin_site_name, path))%0A%0A def test_app_list_ulrs_can_be_queried(self):%0A app_list_urls = dict()%0A for model in admin.site._registry:%0A app_list_url = reverse('%25s:%25s' %25 (self.admin_site_name, 'app_list'), args=(model._meta.app_label,))%0A app_list_urls.update(%7Bmodel._meta.app_label: app_list_url%7D)%0A%0A for url in app_list_urls.values():%0A response = self.client.get(url)%0A self.assertEqual(response.status_code, 200)%0A%0A def test_base_admin_site_urls_can_be_queried(self):%0A pages = %5B'index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n'%5D%0A for name in pages:%0A url = self._reverse_url(name)%0A response = self.client.get(url)%0A self.assertIn(response.status_code, %5B200, 302%5D)%0A%0A def test_changelist_urls_can_be_queried(self):%0A for model in admin.site._registry:%0A url = self._reverse_url('%25s_%25s_changelist' %25 (model._meta.app_label, model._meta.model_name))%0A response = self.client.get(url)%0A self.assertEqual(response.status_code, 200)%0A%0A def test_add_urls_can_be_queried(self):%0A for model in admin.site._registry:%0A model_fullname = '%25s_%25s' %25 (model._meta.app_label, model._meta.model_name)%0A url = self._reverse_url('%25s_add' %25 model_fullname)%0A response = self.client.get(url)%0A self.assertIn(response.status_code, %5B200, 403%5D)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.