repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
snowxmas/alipay-sdk-python-all | alipay/aop/api/domain/KbAdvertSettleBillResponse.py | 96870ced60facd96c5bce18d19371720cbda3317 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbAdvertSettleBillResponse(object):
def __init__(self):
self._download_url = None
self._paid_date = None
@property
def download_url(self):
return self._download_url
@download_url.setter
def download_url(self, value):
self._download_url = value
@property
def paid_date(self):
return self._paid_date
@paid_date.setter
def paid_date(self, value):
self._paid_date = value
def to_alipay_dict(self):
params = dict()
if self.download_url:
if hasattr(self.download_url, 'to_alipay_dict'):
params['download_url'] = self.download_url.to_alipay_dict()
else:
params['download_url'] = self.download_url
if self.paid_date:
if hasattr(self.paid_date, 'to_alipay_dict'):
params['paid_date'] = self.paid_date.to_alipay_dict()
else:
params['paid_date'] = self.paid_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertSettleBillResponse()
if 'download_url' in d:
o.download_url = d['download_url']
if 'paid_date' in d:
o.paid_date = d['paid_date']
return o
| [] |
PLCoster/adventofcode2019 | day5.py | 7aad1503dcf80b127b21191850ad9c93f91a602a | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 11:06:59 2019
@author: Paul
"""
def read_data(filename):
"""
Reads csv file into a list, and converts to ints
"""
data = []
f = open(filename, 'r')
for line in f:
data += line.strip('\n').split(',')
int_data = [int(i) for i in data]
f.close()
return int_data
def run_intcode(program, input_int):
"""
Takes data, list of ints to run int_code on.
Returns list of ints after intcode program has been run.
Running Intcode program looks reads in the integers sequentially in sets of 4:
data[i] == Parameter Mode + Opcode (last two digits)
data[i+1] == Entry 1
data[i+2] == Entry 2
data[i+3] == Entry 3
If Opcode == 1, the value of the opcode at index location = entry 1 and 2
in the program are summed and stored at the index location of entry 3.
If Opcode == 2, the value of the opcode at index location = entry 1 and 2
in the program are multiplied and stored at the index location of entry 3.
If Opcode == 3, the the single integer (input) is saved to the position given
by index 1.
If Opcode == 4, the program outputs the value of its only parameter. E.g. 4,50
would output the value at address 50.
If Opcode == 5 and entry 1 is != 0, the intcode position moves to the index stored
at entry 2. Otherwise it does nothing.
If Opcode == 6 and entry 1 is 0, the intcode postion moves to the index stored
at entry 2. Otherwise it does nothing.
If Opcode == 7 and entry 1> entry 2, store 1 in position given by third param,
otherwise store 0 at position given by third param.
If Opcode == 7 and entry 1 = entry 2, store 1 in position given by third param,
otherwise store 0 at position given by third param.
If Opcode == 99, the program is completed and will stop running.
Parameters are digits to the left of the opcode, read left to right:
Parameter 0 -> Position mode - the entry is treated as an index location
Parameter 1 -> Immediate mode - the entry is treated as a value
"""
data = program[:]
answer = -1
params = [0, 0, 0]
param_modes = ['', '', '']
i = 0
while (i < len(program)):
#print("i = ", i)
# Determine Opcode and parameter codes:
opcode_str = "{:0>5d}".format(data[i])
opcode = int(opcode_str[3:])
param_modes[0] = opcode_str[2]
param_modes[1] = opcode_str[1]
param_modes[2] = opcode_str[0]
#print(opcode_str)
for j in range(2):
if param_modes[j] == '0':
try:
params[j] = data[data[i+j+1]]
except IndexError:
continue
else:
try:
params[j] = data[i+j+1]
except IndexError:
continue
#print(params, param_modes)
# If opcode is 1, add relevant entries:
if opcode == 1:
data[data[i+3]] = params[0] + params[1]
i += 4;
# If opcode is 2, multiply the relevant entries:
elif opcode == 2:
data[data[i+3]] = params[0] * params[1]
i += 4;
# If opcode is 3, store input value at required location.
elif opcode == 3:
data[data[i+1]] = input_int
i += 2;
# If opcode is 4, print out the input stored at specified location.
elif opcode == 4:
answer = data[data[i+1]]
print("Program output: ", data[data[i+1]])
i += 2;
# If the opcode is 5 and the next parameter !=0, jump forward
elif opcode == 5:
if params[0] != 0:
i = params[1]
else:
i += 3
# If the opcode is 6 and next parameter is 0, jump forward
elif opcode == 6:
if params[0] == 0:
i = params[1]
else:
i += 3
# If the opcode is 7, carry out less than comparison and store 1/0 at loc 3
elif opcode == 7:
if params[0] < params[1]:
data[data[i+3]] = 1
else:
data[data[i+3]] = 0
i += 4
# If the opcode is 8, carry out equality comparison and store 1/0 at loc 3
elif opcode == 8:
if params[0] == params[1]:
data[data[i+3]] = 1
else:
data[data[i+3]] = 0
i += 4
# If the opcode is 99, halt the intcode
elif opcode == 99:
print("Program ended by halt code")
break
# If opcode is anything else something has gone wrong!
else:
print("Problem with the Program")
break
return data, answer
program = read_data("day5input.txt")
#print(program)
result1, answer1 = run_intcode(program, 1)
#print(result1)
print("Part 1: Answer is: ", answer1)
result2, answer2 = run_intcode(program, 5)
#print(result2)
print("Part 2: Answer is: ", answer2)
#test_program = [1002,4,3,4,33]
#test_program2 = [3,0,4,0,99]
#test_program3 = [1101,100,-1,4,0]
#test_program4 = [3,9,8,9,10,9,4,9,99,-1,8] # 1 if input = 8, 0 otherwise
#test_program5 = [3,9,7,9,10,9,4,9,99,-1,8] # 1 if input < 8, 0 otherwise
#test_program6 = [3,3,1108,-1,8,3,4,3,99] # 1 if input = 8, 0 otherwise
#test_program7 = [3,3,1107,-1,8,3,4,3,99] # 1 if input < 8, 0 otherwise
#test_program8 = [3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9] # 0 if input = 0, 1 otherwise
#test_program9 = [3,3,1105,-1,9,1101,0,0,12,4,12,99,1] # 0 if input = 0, 1 otherwise
#test_program10 = [3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,
#36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,
#1105,1,46,98,99] # 999 if input < 8, 1000 if input = 8, 1001 if input > 8
| [] |
scclab/textvisdrg-prototype | textvis/textprizm/models.py | e912e4441b0e42e0f6c477edd03227b93b8ace73 | from django.db import models
# Create your models here.
class Schema(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
class Code(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
active_instances = models.PositiveIntegerField(default=0)
schema = models.ForeignKey(Schema, related_name="codes")
code_type = models.IntegerField(default=0)
def __unicode__(self):
if self.description:
return "%s/%s (%d): %s" % (self.schema_id, self.name, self.id, self.description)
else:
return "%s/%s (%d)" % (self.schema_id, self.name, self.id)
class DataSet(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField()
class Session(models.Model):
set = models.ForeignKey(DataSet)
started = models.DateTimeField()
ended = models.DateTimeField()
def __unicode__(self):
return "%d (%s - %s)" % (self.id, str(self.started), str(self.ended))
class Participant(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
def __unicode__(self):
return self.name
class Message(models.Model):
session = models.ForeignKey(Session)
idx = models.IntegerField()
time = models.DateTimeField()
type = models.IntegerField()
participant = models.ForeignKey(Participant, related_name='messages')
message = models.TextField()
codes = models.ManyToManyField(Code, through='CodeInstance')
@classmethod
def get_between(cls, start, end):
"""
Get messages that are inclusively between the two messages, or two dates.
Takes into account the exact ordering of messages,
meaning that you won't get messages at the same time but after the last message, for example.
"""
if isinstance(start, Message):
after_first = ~models.Q(session=start.session) | models.Q(idx__gte=start.idx)
after_first = models.Q(time__gte=start.time) & after_first
else:
after_first = models.Q(time__gte=start)
if isinstance(end, Message):
before_last = ~models.Q(session=end.session) | models.Q(idx__lte=end.idx)
before_last = models.Q(time__lte=end.time) & before_last
else:
before_last = models.Q(time__lte=end)
return cls.objects.filter(after_first, before_last)
@property
def text(self):
return self.message
@property
def user_name(self):
return self.participant.name
@property
def created_at(self):
return self.time
class User(models.Model):
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=250)
email = models.CharField(max_length=250)
def __unicode__(self):
return self.name
class AbstractCodeInstance(models.Model):
class Meta:
abstract = True
code = models.ForeignKey(Code)
message = models.ForeignKey(Message)
added = models.DateTimeField()
class CodeInstance(AbstractCodeInstance):
user = models.ForeignKey(User)
task_id = models.PositiveIntegerField()
intensity = models.FloatField()
flag = models.IntegerField()
| [((6, 11, 6, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((7, 18, 7, 36), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((12, 11, 12, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((13, 18, 13, 36), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((14, 23, 14, 61), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n'), ((15, 13, 15, 60), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((16, 16, 16, 46), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((27, 11, 27, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((28, 14, 28, 36), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import models\n'), ((33, 10, 33, 36), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(33, 28, 33, 35): 'DataSet'}, {}), '(DataSet)', False, 'from django.db import models\n'), ((34, 14, 34, 36), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import models\n'), ((35, 12, 35, 34), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import models\n'), ((42, 11, 42, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((43, 18, 43, 36), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((50, 14, 50, 40), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(50, 32, 50, 39): 'Session'}, {}), '(Session)', False, 'from django.db import models\n'), ((51, 10, 51, 31), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((52, 11, 52, 33), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import models\n'), ((53, 11, 53, 32), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((54, 18, 54, 73), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((55, 14, 55, 32), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((57, 12, 57, 64), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n'), ((96, 11, 96, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((97, 16, 97, 48), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((98, 12, 98, 44), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((107, 11, 107, 34), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(107, 29, 107, 33): 'Code'}, {}), '(Code)', False, 'from django.db import models\n'), ((108, 14, 108, 40), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(108, 32, 108, 39): 'Message'}, {}), '(Message)', False, 'from django.db import models\n'), ((109, 12, 109, 34), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import models\n'), ((114, 11, 114, 34), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(114, 29, 114, 33): 'User'}, {}), '(User)', False, 'from django.db import models\n'), ((115, 14, 115, 43), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((116, 16, 116, 35), 'django.db.models.FloatField', 'models.FloatField', ({}, {}), '()', False, 'from django.db import models\n'), ((117, 11, 117, 32), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((72, 26, 72, 51), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((78, 26, 78, 49), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((69, 61, 69, 89), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((70, 26, 70, 56), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((75, 59, 75, 85), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((76, 26, 76, 54), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((69, 27, 69, 58), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n'), ((75, 27, 75, 56), 'django.db.models.Q', 'models.Q', (), '', False, 'from django.db import models\n')] |
jacob22/accounting | test/test_purchasing.py | e2fceea880e3f056703ba97b6cf52b73cd7af93b | # -*- coding: utf-8 -*-
# Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if (sys.version_info >=(3, 0)):
PYT3 = True
import urllib.request
import urllib.parse
else:
PYT3 = False
import urllib2
import urlparse
import contextlib
import json
import os
import py
import subprocess
import time
import uuid
from . import support
here = os.path.dirname(__file__)
class Container(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def do_purchase(products, emailaddress):
params = {
'data': [
{'items': [{'product': product} for product in products],
'buyerName': 'Kalle Anka',
'buyerEmail': emailaddress}
]
}
if PYT3:
req = urllib.request.Request(urllib.parse.urljoin(support.url, '/rest/purchase'),
json.dumps(params).encode('ascii'),
{'Content-Type': 'application/json'})
data = json.load(urllib.request.urlopen(req))
else:
req = urllib2.Request(urlparse.urljoin(support.url, '/rest/purchase'),
json.dumps(params),
{'Content-Type': 'application/json'})
data = json.load(urllib2.urlopen(req))
return Container(id=data['purchase'],
invoice=data['invoiceUrl'],
buyerEmail=emailaddress)
def check_mail(client, mailssh, purchase, mailtype):
client.run('sendmail -qf')
message, = mailssh.find_and_delete_mail(None, 'TO', purchase.buyerEmail)
msg, headers = mailssh.parse(message)
assert headers['X-OE-MailType'] == [mailtype]
assert purchase.invoice in msg
return msg, headers
@contextlib.contextmanager
def check_mails(client, mailssh, purchase):
check_mail(client, mailssh, purchase, 'order-confirmation')
yield
check_mail(client, mailssh, purchase, 'full-payment-confirmation')
def gen_pg(client, org, id_args=[1, 1]):
cmd = 'python /root/accounting/members/paymentgen.py %s %s %s' % (
org.id, id_args[0], id_args[1])
id_args[0] += 1
id_args[1] += 1000
stdin, stdout, stderr = client.exec_command('PYTHONPATH=/root/accounting ' +
cmd)
return stdout.read()
def upload_pg(tmpdir, ssh, pgdata):
pgfile = tmpdir.join('pgfile')
pgfile.write(pgdata)
dest = uuid.uuid4()
with ssh(username='nordea') as client:
sftp = client.open_sftp()
sftp.put(str(pgfile), 'incoming/%s' % dest, confirm=False)
@py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh',
'ssh', 'org', 'emailaddress')
def test_full_plusgiro_payment(mailssh, ssh, org, emailaddress, tmpdir):
purchase = do_purchase([org.product], emailaddress)
with ssh() as client:
with check_mails(client, mailssh, purchase):
pgdata = gen_pg(client, org)
upload_pg(tmpdir, ssh, pgdata)
@py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh',
'ssh', 'org', 'emailaddress')
def test_partial_plusgiro_payment(ssh, mailssh, org, emailaddress,
tmpdir):
purchase = do_purchase([org.product], emailaddress)
with ssh() as client:
with check_mails(client, mailssh, purchase):
pgdata1 = gen_pg(client, org)
pgdata2 = gen_pg(client, org)
pgdata3 = gen_pg(client, org)
# The sum is 66666 (öre). It is probably unique in the fake pgfile,
# so we can simply replace it in order to make partial payments.
if PYT3:
partial_payment1 = pgdata1.replace(b'66666', b'22222') # pay 222.22 SEK
partial_payment2 = pgdata2.replace(b'66666', b'33333') # pay 333.33 SEK
final_payment = pgdata3.replace(b'66666', b'11111') # final 111.11 SEK
else:
partial_payment1 = pgdata1.replace('66666', '22222') # pay 222.22 SEK
partial_payment2 = pgdata2.replace('66666', '33333') # pay 333.33 SEK
final_payment = pgdata3.replace('66666', '11111') # final 111.11 SEK
upload_pg(tmpdir, ssh, partial_payment1)
msg, headers = check_mail(client, mailssh, purchase,
'partial-payment-confirmation')
assert '222,22' in msg # amount paid
assert '444,44' in msg # amount remaining
upload_pg(tmpdir, ssh, partial_payment2)
msg, headers = check_mail(client, mailssh, purchase,
'partial-payment-confirmation')
assert '333,33' in msg # amount paid
assert '111,11' in msg # amount remaining
upload_pg(tmpdir, ssh, final_payment)
@py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh',
'nodes', 'ssh', 'org', 'emailaddress')
def test_swish_payment(nodes, ssh, mailssh, org, emailaddress):
#py.test.skip('Skip swish tests until certificates work')
purchase = do_purchase([org.product], emailaddress)
with ssh() as client:
with check_mails(client, mailssh, purchase):
print(purchase.invoice)
if PYT3:
parsed = urllib.parse.urlparse(purchase.invoice)
_, _, purchase, _ = parsed.path.split('/')
path = '/providers/swish/charge/%s/%s' % (org.swish_provider, purchase)
url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
data = {'phone': '1231181189'}
req = urllib.request.Request(url, json.dumps(data).encode('ascii'),
{'Content-Type': 'application/json'})
response = json.load(urllib.request.urlopen(req))
else:
parsed = urlparse.urlparse(purchase.invoice)
_, _, purchase, _ = parsed.path.split('/')
path = '/providers/swish/charge/%s/%s' % (org.swish_provider, purchase)
url = urlparse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
data = {'phone': '1231181189'}
req = urllib2.Request(url, json.dumps(data),
{'Content-Type': 'application/json'})
response = json.load(urllib2.urlopen(req))
print(response)
assert response['status'] == 'CREATED'
path = '/providers/swish/poll/%s/%s' % (org.swish_provider,
response['id'])
if PYT3:
url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
else:
url = urlparse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
for _ in range(20):
if PYT3:
req = urllib.request.Request(url)
response = json.load(urllib.request.urlopen(req))
else:
req = urllib2.Request(url)
response = json.load(urllib2.urlopen(req))
print(response)
if response['status'] == 'PAID':
break
time.sleep(1)
| [((37, 7, 37, 32), 'os.path.dirname', 'os.path.dirname', ({(37, 23, 37, 31): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((106, 1, 107, 55), 'py.test.mark.usefixtures', 'py.test.mark.usefixtures', ({(106, 26, 106, 35): '"""cluster"""', (106, 37, 106, 47): '"""clean_db"""', (106, 49, 106, 63): '"""bootstrapped"""', (106, 65, 106, 74): '"""mailssh"""', (107, 26, 107, 31): '"""ssh"""', (107, 33, 107, 38): '"""org"""', (107, 40, 107, 54): '"""emailaddress"""'}, {}), "('cluster', 'clean_db', 'bootstrapped', 'mailssh',\n 'ssh', 'org', 'emailaddress')", False, 'import py\n'), ((116, 1, 117, 55), 'py.test.mark.usefixtures', 'py.test.mark.usefixtures', ({(116, 26, 116, 35): '"""cluster"""', (116, 37, 116, 47): '"""clean_db"""', (116, 49, 116, 63): '"""bootstrapped"""', (116, 65, 116, 74): '"""mailssh"""', (117, 26, 117, 31): '"""ssh"""', (117, 33, 117, 38): '"""org"""', (117, 40, 117, 54): '"""emailaddress"""'}, {}), "('cluster', 'clean_db', 'bootstrapped', 'mailssh',\n 'ssh', 'org', 'emailaddress')", False, 'import py\n'), ((153, 1, 154, 64), 'py.test.mark.usefixtures', 'py.test.mark.usefixtures', ({(153, 26, 153, 35): '"""cluster"""', (153, 37, 153, 47): '"""clean_db"""', (153, 49, 153, 63): '"""bootstrapped"""', (153, 65, 153, 74): '"""mailssh"""', (154, 26, 154, 33): '"""nodes"""', (154, 35, 154, 40): '"""ssh"""', (154, 42, 154, 47): '"""org"""', (154, 49, 154, 63): '"""emailaddress"""'}, {}), "('cluster', 'clean_db', 'bootstrapped', 'mailssh',\n 'nodes', 'ssh', 'org', 'emailaddress')", False, 'import py\n'), ((100, 11, 100, 23), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((60, 30, 60, 77), 'urlparse.urljoin', 'urlparse.urljoin', ({(60, 47, 60, 58): 'support.url', (60, 60, 60, 76): '"""/rest/purchase"""'}, {}), "(support.url, '/rest/purchase')", False, 'import urlparse\n'), ((61, 34, 61, 52), 'json.dumps', 'json.dumps', ({(61, 45, 61, 51): 'params'}, {}), '(params)', False, 'import json\n'), ((63, 25, 63, 45), 'urllib2.urlopen', 'urllib2.urlopen', ({(63, 41, 63, 44): 'req'}, {}), '(req)', False, 'import urllib2\n'), ((172, 25, 172, 60), 'urlparse.urlparse', 'urlparse.urlparse', ({(172, 43, 172, 59): 'purchase.invoice'}, {}), '(purchase.invoice)', False, 'import urlparse\n'), ((175, 22, 176, 55), 'urlparse.urlunparse', 'urlparse.urlunparse', ({(175, 42, 176, 54): "(parsed.scheme, parsed.netloc, path, '', '', '')"}, {}), "((parsed.scheme, parsed.netloc, path, '', '', ''))", False, 'import urlparse\n'), ((190, 22, 191, 55), 'urlparse.urlunparse', 'urlparse.urlunparse', ({(190, 42, 191, 54): "(parsed.scheme, parsed.netloc, path, '', '', '')"}, {}), "((parsed.scheme, parsed.netloc, path, '', '', ''))", False, 'import urlparse\n'), ((204, 16, 204, 29), 'time.sleep', 'time.sleep', ({(204, 27, 204, 28): '(1)'}, {}), '(1)', False, 'import time\n'), ((56, 34, 56, 52), 'json.dumps', 'json.dumps', ({(56, 45, 56, 51): 'params'}, {}), '(params)', False, 'import json\n'), ((178, 43, 178, 59), 'json.dumps', 'json.dumps', ({(178, 54, 178, 58): 'data'}, {}), '(data)', False, 'import json\n'), ((180, 37, 180, 57), 'urllib2.urlopen', 'urllib2.urlopen', ({(180, 53, 180, 56): 'req'}, {}), '(req)', False, 'import urllib2\n'), ((198, 26, 198, 46), 'urllib2.Request', 'urllib2.Request', ({(198, 42, 198, 45): 'url'}, {}), '(url)', False, 'import urllib2\n'), ((199, 41, 199, 61), 'urllib2.urlopen', 'urllib2.urlopen', ({(199, 57, 199, 60): 'req'}, {}), '(req)', False, 'import urllib2\n'), ((168, 50, 168, 66), 'json.dumps', 'json.dumps', ({(168, 61, 168, 65): 'data'}, {}), '(data)', False, 'import json\n')] |
SENERGY-Platform/senergy-connector | iot/downstream/fog_processes.py | 7198f6b2ec08b3c09c53755f259a2711921fdcbe | """
Copyright 2020 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ("Router", )
from ..util import conf, get_logger, mqtt
import threading
import cc_lib
logger = get_logger(__name__.split(".", 1)[-1])
class Router(threading.Thread):
def __init__(self, client: cc_lib.client.Client, mqtt_client: mqtt.Client):
super().__init__(name="downstream-fog-processes-router", daemon=True)
self.__cc = client
self.__mqtt = mqtt_client
def run(self) -> None:
try:
while True:
envelope = self.__cc.receive_fog_processes()
logger.debug(envelope)
self.__mqtt.publish(
"{}/{}".format(conf.MQTTClient.fog_processes_pub_topic, envelope.sub_topic),
envelope.message,
qos=conf.MQTTClient.qos
)
except Exception as ex:
logger.error(ex)
| [] |
aliyaandabekova/DJANGO_PROJECT | django_project/user_profile/migrations/0003_order_payment_method.py | 7b94f80fa56acf936da014aa5d91da79457bf4eb | # Generated by Django 3.2.3 on 2021-05-27 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0002_auto_20210526_1747'),
]
operations = [
migrations.AddField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('cash', 'cash'), ('wallet', 'wallet')], default='cash', max_length=10),
),
]
| [((16, 18, 16, 115), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
mpirnat/aoc2016 | day07/test.py | 1aec59aca01541d0d1c30f85d4668959c82fa35c | #!/usr/bin/env python
import unittest
from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings
from day07 import supports_tls, count_tls_addresses
from day07 import find_abas, supports_ssl, count_ssl_addresses
class TestFindingABBASequences(unittest.TestCase):
cases = (
('abba', True),
('oxyyxo', True),
('aaaa', False),
('abcd', False),
)
def test_finds_abba_sequences(self):
for text, expected in self.cases:
self.assertEqual(has_abba(text), expected)
class TestGettingAllowedChunks(unittest.TestCase):
cases = (
('abba[mnop]qrst[abcd]defg', ['abba', 'qrst', 'defg']),
)
def test_finds_allowed_substrings(self):
for text, expected in self.cases:
self.assertEqual(get_abba_allowed_strings(text), expected)
class TestGettingDisallowedChunks(unittest.TestCase):
cases = (
('abba[mnop]qrst[abcd]defg', ['mnop', 'abcd']),
)
def test_finds_disallowed_substrings(self):
for text, expected in self.cases:
self.assertEqual(get_abba_disallowed_strings(text), expected)
class TestCheckingTLSAddresses(unittest.TestCase):
cases = (
('abba[mnop]qrst', True),
('abcd[bddb]xyyx', False),
('aaaa[qwer]tyui', False),
('ioxxoj[asdfgh]zxcvbn', True),
)
def test_finds_tls_addresses(self):
for text, expected in self.cases:
self.assertEqual(supports_tls(text), expected)
def test_counts_tls_addresses(self):
data = [x[0] for x in self.cases]
self.assertEqual(count_tls_addresses(data), 2)
class TestFindingABASequences(unittest.TestCase):
cases = (
('aba', ['aba']),
('xyxxyx', ['xyx']),
('aaakekeke', ['eke', 'kek']),
('zazbzbzbcdb', ['bzb', 'zaz', 'zbz']),
)
def test_finds_aba_sequences(self):
for text, expected in self.cases:
self.assertEqual(find_abas(text), expected)
class TestCheckingSSLAddresses(unittest.TestCase):
cases = (
('aba[bab]xyz', True),
('xyx[xyx]xyx', False),
('aaa[kek]eke', True),
('zazbz[bzb]cdb', True),
)
def test_finds_ssl_addresses(self):
for text, expected in self.cases:
self.assertEqual(supports_ssl(text), expected)
def test_counts_ssl_addresses(self):
data = [x[0] for x in self.cases]
self.assertEqual(count_ssl_addresses(data), 3)
if __name__ == '__main__':
unittest.main()
| [((90, 4, 90, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((56, 25, 56, 50), 'day07.count_tls_addresses', 'count_tls_addresses', ({(56, 45, 56, 49): 'data'}, {}), '(data)', False, 'from day07 import supports_tls, count_tls_addresses\n'), ((86, 25, 86, 50), 'day07.count_ssl_addresses', 'count_ssl_addresses', ({(86, 45, 86, 49): 'data'}, {}), '(data)', False, 'from day07 import find_abas, supports_ssl, count_ssl_addresses\n'), ((19, 29, 19, 43), 'day07.has_abba', 'has_abba', ({(19, 38, 19, 42): 'text'}, {}), '(text)', False, 'from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings\n'), ((29, 29, 29, 59), 'day07.get_abba_allowed_strings', 'get_abba_allowed_strings', ({(29, 54, 29, 58): 'text'}, {}), '(text)', False, 'from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings\n'), ((39, 29, 39, 62), 'day07.get_abba_disallowed_strings', 'get_abba_disallowed_strings', ({(39, 57, 39, 61): 'text'}, {}), '(text)', False, 'from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings\n'), ((52, 29, 52, 47), 'day07.supports_tls', 'supports_tls', ({(52, 42, 52, 46): 'text'}, {}), '(text)', False, 'from day07 import supports_tls, count_tls_addresses\n'), ((69, 29, 69, 44), 'day07.find_abas', 'find_abas', ({(69, 39, 69, 43): 'text'}, {}), '(text)', False, 'from day07 import find_abas, supports_ssl, count_ssl_addresses\n'), ((82, 29, 82, 47), 'day07.supports_ssl', 'supports_ssl', ({(82, 42, 82, 46): 'text'}, {}), '(text)', False, 'from day07 import find_abas, supports_ssl, count_ssl_addresses\n')] |
HTRPOCODES/HTRPO-v2 | rlnets/PG.py | 7e085e8077e6caa38d192bbd33b41c49b36ad6a6 | import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from basenets.MLP import MLP
from basenets.Conv import Conv
from torch import nn
class FCPG_Gaussian(MLP):
def __init__(self,
n_inputfeats,
n_actions,
sigma,
n_hiddens = [30],
nonlinear = F.tanh,
usebn = False,
outactive = None,
outscaler = None,
initializer = "orthogonal",
initializer_param = {"gain":np.sqrt(2), "last_gain": 0.1}
):
self.n_actions = n_actions
super(FCPG_Gaussian, self).__init__(
n_inputfeats, # input dim
n_actions, # output dim
n_hiddens, # hidden unit number list
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
self.logstd = nn.Parameter(torch.log(sigma * torch.ones(n_actions) + 1e-8))
def forward(self,x, other_data = None):
x = MLP.forward(self, x, other_data)
# for exploration, we need to make sure that the std is not too low.
logstd = torch.clamp(self.logstd, min = np.log(0.1))
return x, logstd.expand_as(x), torch.exp(logstd).expand_as(x)
def cuda(self, device = None):
self.logstd.cuda()
return self._apply(lambda t: t.cuda(device))
class FCPG_Softmax(MLP):
def __init__(self,
n_inputfeats, # input dim
n_actions, # output dim
n_hiddens = [10], # hidden unit number list
nonlinear = F.tanh,
usebn = False,
outactive = F.softmax,
outscaler = None,
initializer = "orthogonal",
initializer_param = {"gain":np.sqrt(2), "last_gain": 0.1}
):
self.n_actions = n_actions
super(FCPG_Softmax, self).__init__(
n_inputfeats, # input dim
n_actions, # output dim
n_hiddens, # hidden unit number list
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
def forward(self, x, other_data=None):
x = MLP.forward(self, x, other_data)
# for exploration, and similar to e-greedy
x = x + 0.01 / self.n_actions
x = x / torch.sum(x, dim = -1, keepdim=True).detach()
return x
class ConvPG_Softmax(Conv):
def __init__(self,
n_inputfeats, # input dim
n_actions, # output dim
k_sizes = [8, 4, 3],
channels = [8, 16, 16],
strides = [4, 2, 2],
fcs = [32, 32, 32], # hidden unit number list
nonlinear = F.relu,
usebn = False,
outactive = F.softmax,
outscaler = None,
initializer="xavier",
initializer_param={}
):
self.n_actions = n_actions
super(ConvPG_Softmax, self).__init__(
n_inputfeats, # input dim
n_actions, # output dim
k_sizes,
channels,
strides,
fcs,
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
def forward(self, x, other_data=None):
x = Conv.forward(self, x, other_data)
# for exploration, and similar to e-greedy
x = x + 0.01 / self.n_actions
x = x / torch.sum(x, dim=-1, keepdim=True).detach()
return x
# TODO: support multi-layer value function in which action is concat before the final layer
class FCVALUE(MLP):
def __init__(self,
n_inputfeats,
n_hiddens = [30],
nonlinear = F.tanh,
usebn = False,
outactive = None,
outscaler = None,
initializer="orthogonal",
initializer_param={"gain":np.sqrt(2), "last_gain": 0.1}
):
super(FCVALUE, self).__init__(
n_inputfeats,
1,
n_hiddens,
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
| [((37, 12, 37, 44), 'basenets.MLP.MLP.forward', 'MLP.forward', ({(37, 24, 37, 28): 'self', (37, 30, 37, 31): 'x', (37, 33, 37, 43): 'other_data'}, {}), '(self, x, other_data)', False, 'from basenets.MLP import MLP\n'), ((72, 12, 72, 44), 'basenets.MLP.MLP.forward', 'MLP.forward', ({(72, 24, 72, 28): 'self', (72, 30, 72, 31): 'x', (72, 33, 72, 43): 'other_data'}, {}), '(self, x, other_data)', False, 'from basenets.MLP import MLP\n'), ((110, 12, 110, 45), 'basenets.Conv.Conv.forward', 'Conv.forward', ({(110, 25, 110, 29): 'self', (110, 31, 110, 32): 'x', (110, 34, 110, 44): 'other_data'}, {}), '(self, x, other_data)', False, 'from basenets.Conv import Conv\n'), ((20, 45, 20, 55), 'numpy.sqrt', 'np.sqrt', ({(20, 53, 20, 54): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((56, 45, 56, 55), 'numpy.sqrt', 'np.sqrt', ({(56, 53, 56, 54): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((126, 43, 126, 53), 'numpy.sqrt', 'np.sqrt', ({(126, 51, 126, 52): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((39, 48, 39, 59), 'numpy.log', 'np.log', ({(39, 55, 39, 58): '0.1'}, {}), '(0.1)', True, 'import numpy as np\n'), ((40, 39, 40, 56), 'torch.exp', 'torch.exp', ({(40, 49, 40, 55): 'logstd'}, {}), '(logstd)', False, 'import torch\n'), ((75, 16, 75, 52), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((113, 16, 113, 50), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((34, 53, 34, 74), 'torch.ones', 'torch.ones', ({(34, 64, 34, 73): 'n_actions'}, {}), '(n_actions)', False, 'import torch\n')] |
m4rkl1u/tensorflow | tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py | 90a8825c7ae9719e8969d45040b4155b0e7de130 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
| [((238, 2, 238, 13), 'tensorflow.python.platform.test.main', 'test.main', ({}, {}), '()', False, 'from tensorflow.python.platform import test\n'), ((60, 11, 60, 63), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', ({(60, 47, 60, 50): 'ind', (60, 52, 60, 55): 'val', (60, 57, 60, 62): 'shape'}, {}), '(ind, val, shape)', True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((71, 11, 71, 63), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', ({(71, 47, 71, 50): 'ind', (71, 52, 71, 55): 'val', (71, 57, 71, 62): 'shape'}, {}), '(ind, val, shape)', True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((77, 11, 77, 63), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', ({(77, 47, 77, 50): 'ind', (77, 52, 77, 55): 'val', (77, 57, 77, 62): 'shape'}, {}), '(ind, val, shape)', True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((187, 4, 187, 23), 'numpy.random.seed', 'np.random.seed', ({(187, 19, 187, 22): '(127)'}, {}), '(127)', True, 'import numpy as np\n'), ((190, 20, 191, 54), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((192, 20, 192, 59), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((196, 12, 196, 66), 'numpy.asarray', 'np.asarray', (), '', True, 'import numpy as np\n'), ((48, 8, 48, 43), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ({(48, 30, 48, 42): 'dtypes.int64'}, {}), '(dtypes.int64)', False, 'from tensorflow.python.ops import array_ops\n'), ((49, 8, 49, 36), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ({(49, 30, 49, 35): 'dtype'}, {}), '(dtype)', False, 'from tensorflow.python.ops import array_ops\n'), ((49, 38, 49, 73), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ({(49, 60, 49, 72): 'dtypes.int64'}, {}), '(dtypes.int64)', False, 'from tensorflow.python.ops import array_ops\n'), ((86, 23, 86, 58), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', ({(86, 39, 86, 57): '[handle0, handle1]'}, {}), '([handle0, handle1])', False, 'from tensorflow.python.ops import array_ops\n'), ((111, 23, 112, 61), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (), '', False, 'from tensorflow.python.framework import ops\n'), ((131, 22, 131, 72), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((132, 21, 132, 49), 'numpy.array', 'np.array', ({(132, 30, 132, 48): "[b'a', b'b', b'c']"}, {}), "([b'a', b'b', b'c'])", True, 'import numpy as np\n'), ((133, 20, 133, 52), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((160, 22, 161, 61), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (), '', False, 'from tensorflow.python.framework import ops\n'), ((52, 10, 53, 28), 'numpy.array', 'np.array', ({(52, 19, 53, 27): '[[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]'}, {}), '([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])', True, 'import numpy as np\n'), ((54, 10, 54, 43), 'numpy.array', 'np.array', ({(54, 19, 54, 42): '[0, 10, 13, 14, 32, 33]'}, {}), '([0, 10, 13, 14, 32, 33])', True, 'import numpy as np\n'), ((59, 12, 59, 28), 'numpy.array', 'np.array', ({(59, 21, 59, 27): '[5, 6]'}, {}), '([5, 6])', True, 'import numpy as np\n'), ((63, 10, 64, 28), 'numpy.array', 'np.array', ({(63, 19, 64, 27): '[[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]]'}, {}), '([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]])', True, 'import numpy as np\n'), ((65, 10, 65, 43), 'numpy.array', 'np.array', ({(65, 19, 65, 42): '[0, 10, 13, 14, 32, 33]'}, {}), '([0, 10, 13, 14, 32, 33])', True, 'import numpy as np\n'), ((70, 12, 70, 28), 'numpy.array', 'np.array', ({(70, 21, 70, 27): '[3, 4]'}, {}), '([3, 4])', True, 'import numpy as np\n'), ((74, 10, 74, 31), 'numpy.array', 'np.array', ({(74, 19, 74, 30): '[[0, 0, 0]]'}, {}), '([[0, 0, 0]])', True, 'import numpy as np\n'), ((75, 10, 75, 23), 'numpy.array', 'np.array', ({(75, 19, 75, 22): '[0]'}, {}), '([0])', True, 'import numpy as np\n'), ((76, 12, 76, 31), 'numpy.array', 'np.array', ({(76, 21, 76, 30): '[3, 4, 5]'}, {}), '([3, 4, 5])', True, 'import numpy as np\n'), ((81, 46, 81, 58), 'numpy.arange', 'np.arange', ({(81, 56, 81, 57): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((82, 46, 82, 58), 'numpy.arange', 'np.arange', ({(82, 56, 82, 57): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((104, 47, 104, 59), 'numpy.arange', 'np.arange', ({(104, 57, 104, 58): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((105, 47, 105, 59), 'numpy.arange', 'np.arange', ({(105, 57, 105, 58): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((153, 47, 153, 59), 'numpy.arange', 'np.arange', ({(153, 57, 153, 58): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((173, 46, 173, 58), 'numpy.arange', 'np.arange', ({(173, 56, 173, 57): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((198, 11, 198, 31), 'tensorflow.python.framework.ops.device', 'ops.device', ({(198, 22, 198, 30): '"""/cpu:0"""'}, {}), "('/cpu:0')", False, 'from tensorflow.python.framework import ops\n'), ((199, 18, 199, 45), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', ({(199, 37, 199, 44): 'indices'}, {}), '(indices)', False, 'from tensorflow.python.ops import variables\n'), ((200, 17, 200, 43), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', ({(200, 36, 200, 42): 'values'}, {}), '(values)', False, 'from tensorflow.python.ops import variables\n'), ((201, 16, 201, 41), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', ({(201, 35, 201, 40): 'shape'}, {}), '(shape)', False, 'from tensorflow.python.ops import variables\n'), ((202, 13, 202, 67), 'tensorflow.python.framework.sparse_tensor.SparseTensor', 'sparse_tensor_lib.SparseTensor', ({(202, 44, 202, 51): 'indices', (202, 53, 202, 59): 'values', (202, 61, 202, 66): 'shape'}, {}), '(indices, values, shape)', True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((209, 24, 209, 60), 'tensorflow.python.ops.sparse_ops.serialize_many_sparse', 'sparse_ops.serialize_many_sparse', ({(209, 57, 209, 59): 'st'}, {}), '(st)', False, 'from tensorflow.python.ops import sparse_ops\n'), ((210, 26, 211, 46), 'tensorflow.python.ops.sparse_ops.deserialize_many_sparse', 'sparse_ops.deserialize_many_sparse', (), '', False, 'from tensorflow.python.ops import sparse_ops\n'), ((218, 8, 219, 62), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(218, 32, 218, 58): 'st_roundtrip_values.values', (219, 32, 219, 61): 'st_deserialized_values.values'}, {}), '(st_roundtrip_values.values, st_deserialized_values.\n values)', True, 'import numpy as np\n'), ((220, 8, 221, 63), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(220, 32, 220, 59): 'st_roundtrip_values.indices', (221, 32, 221, 62): 'st_deserialized_values.indices'}, {}), '(st_roundtrip_values.indices, st_deserialized_values\n .indices)', True, 'import numpy as np\n'), ((222, 8, 223, 67), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(222, 32, 222, 63): 'st_roundtrip_values.dense_shape', (223, 32, 223, 66): 'st_deserialized_values.dense_shape'}, {}), '(st_roundtrip_values.dense_shape,\n st_deserialized_values.dense_shape)', True, 'import numpy as np\n'), ((80, 28, 80, 39), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ({}, {}), '()', False, 'from tensorflow.python.framework import ops\n'), ((197, 32, 197, 60), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ({}, {}), '()', False, 'from tensorflow.python.platform import benchmark\n'), ((214, 8, 214, 48), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ({}, {}), '()', False, 'from tensorflow.python.ops import variables\n')] |
RULCSoft/cloudroast | cloudroast/objectstorage/smoke/object_smoke.py | 30f0e64672676c3f90b4a582fe90fac6621475b3 | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import calendar
import time
import zlib
from hashlib import md5
import unittest
from cafe.drivers.unittest.decorators import (
DataDrivenFixture, data_driven_test)
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudroast.objectstorage.generators import (
ObjectDatasetList, CONTENT_TYPES)
CONTAINER_DESCRIPTOR = 'object_smoke_test'
STATUS_CODE_MSG = ('{method} expected status code {expected}'
' received status code {received}')
@DataDrivenFixture
class ObjectSmokeTest(ObjectStorageFixture):
@classmethod
def setUpClass(cls):
super(ObjectSmokeTest, cls).setUpClass()
cls.default_obj_name = Constants.VALID_OBJECT_NAME_WITH_UNICODE
@staticmethod
def generate_chunk_data():
for i in range(10):
yield "Test chunk %s\r\n" % i
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_with_valid_object_name(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
response = self.client.get_object(container_name, object_name)
method = 'object creation with valid object name'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_retrieval_with_if_match(
self, object_type, generate_object):
"""
Bug filed for dlo/slo support of If-match Header:
https://bugs.launchpad.net/swift/+bug/1279076
"""
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
obj_info = generate_object(container_name, object_name)
headers = {'If-Match': obj_info.get('etag')}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if match header'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_retrieval_with_if_none_match(
self, object_type, generate_object):
"""
Bug filed for dlo/slo support of If-match Header:
https://bugs.launchpad.net/swift/+bug/1279076
"""
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_info = generate_object(container_name, object_name)
headers = {'If-None-Match': 'grok'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if none match header'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
headers = {'If-None-Match': object_info.get('etag')}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object should be flagged as not modified'
expected = 304
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_with_if_modified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Modified-Since': 'Fri, 17 Aug 2001 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if modified since header (past date)'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_not_modified_with_if_modified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Modified-Since': 'Fri, 17 Aug 2101 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if modified since header (future date)'
expected = 304
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_with_if_unmodified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Unmodified-Since': 'Fri, 17 Aug 2101 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if unmodified since header'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_fails_with_if_unmodified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Unmodified-Since': 'Fri, 17 Aug 2001 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = ('object retrieval precondition fail with if unmodified'
' since header')
expected = 412
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_start_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=5-'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with start range'
expected = 206
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_end_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=-4'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with end range'
expected = 206
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=5-8'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with start and end range'
expected = 206
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_complete_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=99-0'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with complete range'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_valid_object_name(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_info = generate_object(container_name, object_name)
response = object_info.get('response')
method = 'object creation with valid object name'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
container_name,
self.default_obj_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response_md5 = md5(response.content).hexdigest()
self.assertEqual(
object_info.get('md5'),
response_md5,
msg='should return identical object')
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_update_with_valid_object_name(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
updated_object_data = 'Updated test file data'
updated_content_length = str(len(updated_object_data))
headers = {'Content-Length': updated_content_length,
'Content-Type': CONTENT_TYPES.get('text')}
response = self.client.create_object(
container_name,
self.default_obj_name,
headers=headers,
data=updated_object_data)
method = 'object update with valid object name'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_etag(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_info = generate_object(container_name, object_name)
response = object_info.get('response')
method = 'object creation with etag header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
response = self.client.get_object(
container_name,
self.default_obj_name)
self.assertIn(
'etag',
response.headers,
msg="Etag header was set")
if object_type == 'standard':
expected = object_info.get('etag')
else:
expected = '"{0}"'.format(object_info.get('etag'))
received = response.headers.get('etag')
self.assertEqual(
expected,
received,
msg='object created with Etag header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def test_object_creation_with_uppercase_etag(self):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_data = "valid_data"
data_md5 = md5(object_data).hexdigest()
upper_etag = data_md5.upper()
headers = {"ETag": upper_etag}
create_response = self.client.create_object(container_name,
object_name,
data=object_data,
headers=headers)
method = 'object creation with uppercase etag header'
expected = 201
received = create_response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
object_response = self.client.get_object(
container_name,
self.default_obj_name)
self.assertIn(
'etag',
object_response.headers,
msg="Etag header was set")
expected = data_md5
received = object_response.headers.get('etag')
self.assertEqual(
expected,
received,
msg='object created with Etag header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_allow_credentials(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Allow-Credentials': 'true'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Allow-Credentials header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Allow-Credentials',
response.headers,
msg="Access-Control-Allow-Credentials header was set")
expected = 'true'
received = response.headers.get('Access-Control-Allow-Credentials')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Allow-Credentials header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_allow_methods(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Allow-Methods header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Allow-Methods',
response.headers,
msg="Access-Control-Allow-Methods header was set")
expected = 'GET, POST, OPTIONS'
received = response.headers.get('Access-Control-Allow-Methods')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Allow-Methods header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_allow_origin(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Access-Control-Allow-Origin': 'http://example.com'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Allow-Origin header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name, self.default_obj_name)
self.assertIn(
'Access-Control-Allow-Origin',
response.headers,
msg="Access-Control-Allow-Origin header was set")
expected = 'http://example.com'
received = response.headers.get('Access-Control-Allow-Origin')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Allow-Origin header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_expose_headers(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Expose-Headers': 'X-Foo-Header'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Expose-Headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Expose-Headers',
response.headers,
msg="Access-Control-Expose-Headers header was set")
expected = 'X-Foo-Header'
received = response.headers.get('Access-Control-Expose-Headers')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Expose-Headers header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_controle_max_age(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Max-Age': '5'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Max-Age header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Max-Age',
response.headers,
msg="Access-Control-Max-Age header was set")
expected = '5'
received = response.headers.get('Access-Control-Max-Age')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Max-Age header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_request_headers(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Request-Headers': 'x-requested-with'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Request-Headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Request-Headers',
response.headers,
msg="Access-Control-Request-Headers header was set")
expected = 'x-requested-with'
received = response.headers.get('Access-Control-Request-Headers')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Request-Headers header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_request_method(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Request-Method': 'GET'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Request-Method header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Request-Method',
response.headers,
msg="Access-Control-Request-Method header was set")
expected = 'GET'
received = response.headers.get('Access-Control-Request-Method')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Request-Method header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_retrieval_with_origin(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
headers = {'access-control-allow-origin': 'http://example.com',
'access-control-expose-headers': 'X-Trans-Id'}
generate_object(container_name, object_name, headers=headers)
headers = {'Origin': 'http://example.com'}
response = self.client.get_object_metadata(
container_name, object_name, headers=headers)
self.assertIn(
'access-control-expose-headers',
response.headers,
msg="access-control-expose-headers header should be set")
self.assertIn(
'access-control-allow-origin',
response.headers,
msg="access-control-allow-origin header should be set")
expected = 'http://example.com'
received = response.headers.get('access-control-allow-origin')
self.assertEqual(
expected,
received,
msg='access-control-allow-origin header should reflect origin'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_creation_with_file_compression(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
def object_data_op(data, extra_data):
data = zlib.compress(data)
return (data, extra_data)
object_headers = {'Content-Encoding': 'gzip'}
object_info = generate_object(container_name, object_name,
data_op=object_data_op,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Content-Encoding header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Content-Encoding',
response.headers,
msg="Content-Encoding header was set")
expected = 'gzip'
received = response.headers.get('Content-Encoding')
self.assertEqual(
expected,
received,
msg='object created with Content-Encoding header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_content_disposition(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Content-Disposition': 'attachment; filename=testdata.txt'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with content disposition header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Content-Disposition',
response.headers,
msg="Content-Disposition header was set")
expected = 'attachment; filename=testdata.txt'
received = response.headers.get('Content-Disposition')
self.assertEqual(
expected,
received,
msg='object created with Content-Disposition header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_x_delete_at(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
start_time = calendar.timegm(time.gmtime())
future_time = str(int(start_time + 60))
object_headers = {'X-Delete-At': future_time}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with X-Delete-At header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Delete-At',
response.headers,
msg="X-Delete-At header was set")
expected = future_time
received = response.headers.get('X-Delete-At')
self.assertEqual(
expected,
received,
msg='object created with X-Delete-At header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_delete_after(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'X-Delete-After': '60'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with X-Delete-After header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Delete-At',
response.headers,
msg="X-Delete-At header was set")
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object_versioning')
def ddtest_versioned_container_creation_with_valid_data(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_history_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
headers = {'X-Versions-Location': object_history_container_name}
self.client.set_container_metadata(container_name, headers=headers)
# list objects in non-current container
response = self.client.list_objects(
object_history_container_name)
method = 'list on empty versioned container'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
# Create an object (version 1)
object_name = self.default_obj_name
ver1_info = generate_object(container_name, object_name)
response = ver1_info.get('response')
method = 'object version one creation'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
# Update an object (version 2)
object_name = self.default_obj_name
ver2_info = generate_object(container_name, object_name)
response = ver2_info.get('response')
method = 'update version one object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.list_objects(object_history_container_name)
method = 'list on versioned container'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@unittest.skip('Problem with this tests assertion, needs review')
@data_driven_test(ObjectDatasetList())
def ddtest_put_copy_object(self, object_type, generate_object):
src_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
dest_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
src_object_name = '{0}_source'.format(self.default_obj_name)
generate_object(src_container_name, src_object_name)
dest_obj_name = '{0}_destination'.format(self.default_obj_name)
source = '/{0}/{1}'.format(src_container_name, src_object_name)
hdrs = {'X-Copy-From': source, 'Content-Length': '0'}
response = self.client.copy_object(
dest_container_name,
dest_obj_name,
headers=hdrs)
method = 'put copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_obj_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_copy_object(self, object_type, generate_object):
src_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
dest_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
src_object_name = '{0}_source'.format(self.default_obj_name)
generate_object(src_container_name, src_object_name)
dest_object_name = '{0}_destination'.format(self.default_obj_name)
dest = '/{0}/{1}'.format(dest_container_name, dest_object_name)
headers = {'Destination': dest}
response = self.client.copy_object(
src_container_name,
src_object_name,
headers=headers)
method = 'copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_object_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_deletion_with_valid_object(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
response = self.client.delete_object(
container_name,
object_name)
method = 'delete object'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
container_name,
self.default_obj_name)
method = 'object retrieval'
expected = 404
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_obj_metadata_update_with_object_possessing_metadata(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name,
headers={'X-Object-Meta-Grok': 'Drok'})
response = self.client.get_object_metadata(
container_name, object_name)
self.assertIn(
'X-Object-Meta-Grok',
response.headers,
msg="object not created with X-Object-Meta-Grok header")
expected = 'Drok'
received = response.headers.get('X-Object-Meta-Grok')
self.assertEqual(
expected,
received,
msg='object created with X-Object-Meta-Grok header value'
' expected: {0} received: {1}'.format(expected, received))
headers = {'X-Object-Meta-Foo': 'Bar'}
response = self.client.set_object_metadata(
container_name,
self.default_obj_name,
headers=headers)
method = 'set object metadata'
expected = 202
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Object-Meta-Foo',
response.headers,
msg="object updated with X-Object-Meta-Foo header")
expected = 'Bar'
received = response.headers.get('X-Object-Meta-Foo')
self.assertEqual(
expected,
received,
msg='object X-Object-Meta-Foo header value expected: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_obj_metadata_update(self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'X-Object-Meta-Grok': 'Drok'}
response = self.client.set_object_metadata(
container_name, object_name, headers=headers)
method = 'set object metadata X-Object-Meta-Grok: Drok'
expected = 202
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Object-Meta-Grok',
response.headers,
msg="object updated with X-Object-Meta-Grok header")
expected = 'Drok'
received = response.headers.get('X-Object-Meta-Grok')
self.assertEqual(
expected,
received,
msg='object X-Object-Meta-Grok header value expected: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_content_type_not_detected_without_detect_content_type_header(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object1_name = 'object1.txt'
object1_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object1_name, headers=object1_headers)
object2_name = 'object2.txt'
object2_headers = {'X-Detect-Content-Type': False,
'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object2_name, headers=object2_headers)
response = self.client.get_object(
container_name, object1_name)
expected = 'application/x-www-form-urlencoded'
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
response = self.client.get_object(
container_name, object2_name)
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_content_type_detected_with_detect_content_type(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object1_name = 'object1.txt'
object1_headers = {'X-Detect-Content-Type': True,
'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object1_name, headers=object1_headers)
response = self.client.get_object(
container_name, object1_name)
expected = 'text/plain'
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
object2_name = 'object2.txt'
object2_headers = {'X-Detect-Content-Type': True}
generate_object(container_name, object2_name, headers=object2_headers)
response = self.client.get_object(
container_name, object2_name)
expected = 'text/plain'
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
def test_object_creation_via_chunked_transfer(self):
"""
Scenario:
Create an object using chunked transfer encoding.
Expected Results:
Return a 201 status code and a single object should
be created.
"""
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
headers = {"Transfer-Encoding": "chunked"}
create_response = self.client.create_object(
container_name,
self.default_obj_name,
headers=headers,
data=self.generate_chunk_data())
method = 'Object creation via chunked transfer'
expected = 201
received = create_response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
object_response = self.client.get_object(container_name,
self.default_obj_name)
method = 'Object retrieval'
expected = 200
received = object_response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
| [((533, 5, 533, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(533, 44, 533, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((577, 5, 577, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(577, 44, 577, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((622, 5, 622, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(622, 44, 622, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((666, 5, 666, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(666, 44, 666, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((710, 5, 710, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(710, 44, 710, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((754, 5, 754, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(754, 44, 754, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((798, 5, 798, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(798, 44, 798, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((842, 5, 842, 58), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(842, 44, 842, 57): '"""object-cors"""'}, {}), "('object-cors')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((1041, 5, 1041, 64), 'cloudroast.objectstorage.fixtures.ObjectStorageFixture.required_features', 'ObjectStorageFixture.required_features', ({(1041, 44, 1041, 63): '"""object_versioning"""'}, {}), "('object_versioning')", False, 'from cloudroast.objectstorage.fixtures import ObjectStorageFixture\n'), ((1116, 5, 1116, 69), 'unittest.skip', 'unittest.skip', ({(1116, 19, 1116, 68): '"""Problem with this tests assertion, needs review"""'}, {}), "('Problem with this tests assertion, needs review')", False, 'import unittest\n'), ((47, 22, 47, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((69, 22, 69, 63), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', (), '', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((100, 22, 100, 63), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', (), '', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((150, 22, 150, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((177, 22, 177, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((204, 22, 204, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((231, 22, 231, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((259, 22, 259, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((284, 22, 284, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((311, 22, 311, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((338, 22, 338, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((365, 22, 365, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((408, 22, 408, 63), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', (), '', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((438, 22, 438, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((485, 22, 485, 63), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', (), '', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((532, 22, 532, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((576, 22, 576, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((621, 22, 621, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((665, 22, 665, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((709, 22, 709, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((753, 22, 753, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((797, 22, 797, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((841, 22, 841, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((875, 22, 875, 63), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', (), '', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((922, 22, 922, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((964, 22, 964, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1008, 22, 1008, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1040, 22, 1040, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1117, 22, 1117, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1164, 22, 1164, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1211, 22, 1211, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1251, 22, 1251, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1313, 22, 1313, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1354, 22, 1354, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((1389, 22, 1389, 41), 'cloudroast.objectstorage.generators.ObjectDatasetList', 'ObjectDatasetList', ({}, {}), '()', False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((419, 35, 419, 60), 'cloudroast.objectstorage.generators.CONTENT_TYPES.get', 'CONTENT_TYPES.get', ({(419, 53, 419, 59): '"""text"""'}, {}), "('text')", False, 'from cloudroast.objectstorage.generators import ObjectDatasetList, CONTENT_TYPES\n'), ((883, 19, 883, 38), 'zlib.compress', 'zlib.compress', ({(883, 33, 883, 37): 'data'}, {}), '(data)', False, 'import zlib\n'), ((971, 37, 971, 50), 'time.gmtime', 'time.gmtime', ({}, {}), '()', False, 'import time\n'), ((402, 23, 402, 44), 'hashlib.md5', 'md5', ({(402, 27, 402, 43): 'response.content'}, {}), '(response.content)', False, 'from hashlib import md5\n'), ((492, 19, 492, 35), 'hashlib.md5', 'md5', ({(492, 23, 492, 34): 'object_data'}, {}), '(object_data)', False, 'from hashlib import md5\n')] |
remicalixte/integrations-core | ceph/tests/conftest.py | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckDockerLogs
from datadog_checks.dev.subprocess import run_command
from .common import BASIC_CONFIG, HERE
E2E_METADATA = {
'start_commands': [
'apt-get update',
'apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y docker.io',
],
'docker_volumes': ['/var/run/docker.sock:/var/run/docker.sock'],
}
@pytest.fixture(scope="session")
def dd_environment():
compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml')
# We need a custom condition to wait a bit longer
with docker_run(
compose_file=compose_file,
conditions=[
CheckDockerLogs(compose_file, 'spawning ceph --cluster ceph -w', wait=5),
CheckDockerLogs(compose_file, 'Running on http://0.0.0.0:5000/'),
],
):
# Clean the disk space warning
run_command(
['docker', 'exec', 'dd-test-ceph', 'ceph', 'tell', 'mon.*', 'injectargs', '--mon_data_avail_warn', '5']
)
# Wait a bit for the change to take effect
condition = CheckDockerLogs(compose_file, 'Cluster is now healthy')
condition()
yield BASIC_CONFIG, E2E_METADATA
| [((24, 1, 24, 32), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((26, 19, 26, 71), 'os.path.join', 'os.path.join', ({(26, 32, 26, 36): 'HERE', (26, 38, 26, 47): '"""compose"""', (26, 49, 26, 70): '"""docker-compose.yaml"""'}, {}), "(HERE, 'compose', 'docker-compose.yaml')", False, 'import os\n'), ((36, 8, 38, 9), 'datadog_checks.dev.subprocess.run_command', 'run_command', ({(37, 12, 37, 115): "['docker', 'exec', 'dd-test-ceph', 'ceph', 'tell', 'mon.*', 'injectargs',\n '--mon_data_avail_warn', '5']"}, {}), "(['docker', 'exec', 'dd-test-ceph', 'ceph', 'tell', 'mon.*',\n 'injectargs', '--mon_data_avail_warn', '5'])", False, 'from datadog_checks.dev.subprocess import run_command\n'), ((40, 20, 40, 75), 'datadog_checks.dev.conditions.CheckDockerLogs', 'CheckDockerLogs', ({(40, 36, 40, 48): 'compose_file', (40, 50, 40, 74): '"""Cluster is now healthy"""'}, {}), "(compose_file, 'Cluster is now healthy')", False, 'from datadog_checks.dev.conditions import CheckDockerLogs\n'), ((31, 12, 31, 84), 'datadog_checks.dev.conditions.CheckDockerLogs', 'CheckDockerLogs', (), '', False, 'from datadog_checks.dev.conditions import CheckDockerLogs\n'), ((32, 12, 32, 76), 'datadog_checks.dev.conditions.CheckDockerLogs', 'CheckDockerLogs', ({(32, 28, 32, 40): 'compose_file', (32, 42, 32, 75): '"""Running on http://0.0.0.0:5000/"""'}, {}), "(compose_file, 'Running on http://0.0.0.0:5000/')", False, 'from datadog_checks.dev.conditions import CheckDockerLogs\n')] |
weex/federation | federation/hostmeta/fetchers.py | 01357aacb04b076442ce5f803a0fc65df5a74d09 | import json
from typing import Dict, Optional
import requests
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document,
parse_matrix_document, parse_misskey_document)
from federation.utils.network import fetch_document
HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1
def fetch_mastodon_document(host):
doc, status_code, error = fetch_document(host=host, path='/api/v1/instance')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_mastodon_document(doc, host)
def fetch_matrix_document(host: str) -> Optional[Dict]:
doc, status_code, error = fetch_document(host=host, path='/_matrix/federation/v1/version')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_matrix_document(doc, host)
def fetch_misskey_document(host: str, mastodon_document: Dict=None) -> Optional[Dict]:
try:
response = requests.post(f'https://{host}/api/meta') # ¯\_(ツ)_/¯
except Exception:
return
try:
doc = response.json()
except json.JSONDecodeError:
return
if response.status_code == 200:
return parse_misskey_document(doc, host, mastodon_document=mastodon_document)
def fetch_nodeinfo_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/nodeinfo')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
url, highest_version = '', 0.0
if doc.get('0'):
# Buggy NodeInfo from certain old Hubzilla versions
url = doc.get('0', {}).get('href')
elif isinstance(doc.get('links'), dict):
# Another buggy NodeInfo from certain old Hubzilla versions
url = doc.get('links').get('href')
else:
for link in doc.get('links'):
version = float(link.get('rel').split('/')[-1])
if highest_version < version <= HIGHEST_SUPPORTED_NODEINFO_VERSION:
url, highest_version = link.get('href'), version
if not url:
return
doc, status_code, error = fetch_document(url=url)
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo_document(doc, host)
def fetch_nodeinfo2_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/x-nodeinfo2')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo2_document(doc, host)
def fetch_statisticsjson_document(host):
doc, status_code, error = fetch_document(host=host, path='/statistics.json')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_statisticsjson_document(doc, host)
| [((15, 30, 15, 80), 'federation.utils.network.fetch_document', 'fetch_document', (), '', False, 'from federation.utils.network import fetch_document\n'), ((22, 11, 22, 45), 'federation.hostmeta.parsers.parse_mastodon_document', 'parse_mastodon_document', ({(22, 35, 22, 38): 'doc', (22, 40, 22, 44): 'host'}, {}), '(doc, host)', False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((26, 30, 26, 94), 'federation.utils.network.fetch_document', 'fetch_document', (), '', False, 'from federation.utils.network import fetch_document\n'), ((33, 11, 33, 43), 'federation.hostmeta.parsers.parse_matrix_document', 'parse_matrix_document', ({(33, 33, 33, 36): 'doc', (33, 38, 33, 42): 'host'}, {}), '(doc, host)', False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((50, 30, 50, 85), 'federation.utils.network.fetch_document', 'fetch_document', (), '', False, 'from federation.utils.network import fetch_document\n'), ((75, 30, 75, 53), 'federation.utils.network.fetch_document', 'fetch_document', (), '', False, 'from federation.utils.network import fetch_document\n'), ((82, 11, 82, 45), 'federation.hostmeta.parsers.parse_nodeinfo_document', 'parse_nodeinfo_document', ({(82, 35, 82, 38): 'doc', (82, 40, 82, 44): 'host'}, {}), '(doc, host)', False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((86, 30, 86, 88), 'federation.utils.network.fetch_document', 'fetch_document', (), '', False, 'from federation.utils.network import fetch_document\n'), ((93, 11, 93, 46), 'federation.hostmeta.parsers.parse_nodeinfo2_document', 'parse_nodeinfo2_document', ({(93, 36, 93, 39): 'doc', (93, 41, 93, 45): 'host'}, {}), '(doc, host)', False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((97, 30, 97, 80), 'federation.utils.network.fetch_document', 'fetch_document', (), '', False, 'from federation.utils.network import fetch_document\n'), ((104, 11, 104, 51), 'federation.hostmeta.parsers.parse_statisticsjson_document', 'parse_statisticsjson_document', ({(104, 41, 104, 44): 'doc', (104, 46, 104, 50): 'host'}, {}), '(doc, host)', False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((19, 14, 19, 29), 'json.loads', 'json.loads', ({(19, 25, 19, 28): 'doc'}, {}), '(doc)', False, 'import json\n'), ((30, 14, 30, 29), 'json.loads', 'json.loads', ({(30, 25, 30, 28): 'doc'}, {}), '(doc)', False, 'import json\n'), ((38, 19, 38, 60), 'requests.post', 'requests.post', ({(38, 33, 38, 59): 'f"""https://{host}/api/meta"""'}, {}), "(f'https://{host}/api/meta')", False, 'import requests\n'), ((46, 15, 46, 85), 'federation.hostmeta.parsers.parse_misskey_document', 'parse_misskey_document', (), '', False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((54, 14, 54, 29), 'json.loads', 'json.loads', ({(54, 25, 54, 28): 'doc'}, {}), '(doc)', False, 'import json\n'), ((79, 14, 79, 29), 'json.loads', 'json.loads', ({(79, 25, 79, 28): 'doc'}, {}), '(doc)', False, 'import json\n'), ((90, 14, 90, 29), 'json.loads', 'json.loads', ({(90, 25, 90, 28): 'doc'}, {}), '(doc)', False, 'import json\n'), ((101, 14, 101, 29), 'json.loads', 'json.loads', ({(101, 25, 101, 28): 'doc'}, {}), '(doc)', False, 'import json\n')] |
iag0g0mes/t2_fis_driving_style | features/analysis_features.py | 7f62ac3e67e65e7bd1273a2f845eb05820e95b70 | import numpy as np
from typing import Any, Dict, List, Tuple, NoReturn
import argparse
import os
def parse_arguments() -> Any:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default="",
type=str,
help="Directory where the features (npy files) are saved",
)
parser.add_argument("--mode",
required=True,
type=str,
help="train/val/test/sample",
choices=['train', 'test', 'val','sample'])
parser.add_argument("--obs_len",
default=2,
type=int,
help="Observed length of the trajectory in seconds",
choices=[1,2,3,4,5])
parser.add_argument("--filter",
default='ekf',
type=str,
help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol",
choices=['ekf', 'none', 'ekf-savgol', 'savgol'])
return parser.parse_args()
def stats(traj:np.ndarray) -> NoReturn:
#central tendency : mean
#dispersion : std
#bounds : min max
#quantile : 0.25, 0.5, 0.75
labels = ['mean_v', 'mean_acc', 'mean_deac', 'std_jy']
for i, l in zip(range(0, traj.shape[1]), labels):
t = traj[:, i]
_mean = round(np.mean(t),2)
_std = round(np.std(t),2)
_min = round(np.min(t),2)
_max = round(np.max(t),2)
_q25 = round(np.quantile(t, 0.25),2)
_q50 = round(np.quantile(t, 0.5),2)
_q75 = round(np.quantile(t, 0.75),2)
print (f'Feature: {l}')
print ('\tmean:{} | std:{} | min:{} | max:{} | q25:{} | q50:{} | q75:{}'.format(_mean,
_std, _min, _max, _q25, _q50, _q75))
if __name__== '__main__':
#_filters = ['none', 'ekf', 'savgol', 'ekf-savgol']
#_modes = ['train', 'val', 'test', 'sample']
#_obs_len = [2,5]
#seg = _obs_len[0]
#mode = _modes[3]
#filter_name = _filters[0]
args = parse_arguments()
if args.mode == 'test':
args.obs_len = 2
assert os.path.exists(args.data_dir),\
f'[Analysis][main][ERROR] data_dir not found!({args.data_dir})'
data_file = 'features_{}_{}s_{}.npy'.format(args.mode,
args.obs_len,
args.filter)
assert os.path.exists(os.path.join(args.data_dir, data_file)),\
f'[Analysis][main][ERROR] data_file not found!({data_file})'
print ('[Analysis] loading dataset....')
# (m, 4)
# [mean_v, mean_acc, mean_deac, std_jy]
data = np.load(os.path.join(args.data_dir,data_file))
print ('[Analysis] mode:{} | filter:{} | obs_len:{}'.format(args.mode,
args.filter,
args.obs_len))
print ('[Analysis] data shape:{}'.format(data.shape))
print ('[Analysis] stats:')
stats(data)
| [((11, 10, 11, 35), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((80, 8, 80, 37), 'os.path.exists', 'os.path.exists', ({(80, 23, 80, 36): 'args.data_dir'}, {}), '(args.data_dir)', False, 'import os\n'), ((87, 23, 87, 61), 'os.path.join', 'os.path.join', ({(87, 36, 87, 49): 'args.data_dir', (87, 51, 87, 60): 'data_file'}, {}), '(args.data_dir, data_file)', False, 'import os\n'), ((93, 16, 93, 53), 'os.path.join', 'os.path.join', ({(93, 29, 93, 42): 'args.data_dir', (93, 43, 93, 52): 'data_file'}, {}), '(args.data_dir, data_file)', False, 'import os\n'), ((51, 16, 51, 26), 'numpy.mean', 'np.mean', ({(51, 24, 51, 25): 't'}, {}), '(t)', True, 'import numpy as np\n'), ((52, 16, 52, 25), 'numpy.std', 'np.std', ({(52, 23, 52, 24): 't'}, {}), '(t)', True, 'import numpy as np\n'), ((53, 16, 53, 25), 'numpy.min', 'np.min', ({(53, 23, 53, 24): 't'}, {}), '(t)', True, 'import numpy as np\n'), ((54, 16, 54, 25), 'numpy.max', 'np.max', ({(54, 23, 54, 24): 't'}, {}), '(t)', True, 'import numpy as np\n'), ((55, 16, 55, 36), 'numpy.quantile', 'np.quantile', ({(55, 28, 55, 29): 't', (55, 31, 55, 35): '0.25'}, {}), '(t, 0.25)', True, 'import numpy as np\n'), ((56, 16, 56, 35), 'numpy.quantile', 'np.quantile', ({(56, 28, 56, 29): 't', (56, 31, 56, 34): '0.5'}, {}), '(t, 0.5)', True, 'import numpy as np\n'), ((57, 16, 57, 36), 'numpy.quantile', 'np.quantile', ({(57, 28, 57, 29): 't', (57, 31, 57, 35): '0.75'}, {}), '(t, 0.75)', True, 'import numpy as np\n')] |
scottwedge/OpenStack-Stein | python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py | 7077d1f602031dace92916f14e36b124f474de15 | # -*- encoding: utf-8 -*-
# Copyright (c) 2017 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import freezegun
import mock
import oslo_messaging as om
from watcher.common import rpc
from watcher import notifications
from watcher.objects import service as w_service
from watcher.tests.db import base
from watcher.tests.objects import utils
@freezegun.freeze_time('2016-10-18T09:52:05.219414')
class TestActionPlanNotification(base.DbTestCase):
def setUp(self):
super(TestActionPlanNotification, self).setUp()
p_get_notifier = mock.patch.object(rpc, 'get_notifier')
m_get_notifier = p_get_notifier.start()
self.addCleanup(p_get_notifier.stop)
self.m_notifier = mock.Mock(spec=om.Notifier)
def fake_get_notifier(publisher_id):
self.m_notifier.publisher_id = publisher_id
return self.m_notifier
m_get_notifier.side_effect = fake_get_notifier
def test_service_failed(self):
service = utils.get_test_service(mock.Mock(),
created_at=datetime.datetime.utcnow())
state = w_service.ServiceStatus.FAILED
notifications.service.send_service_update(mock.MagicMock(),
service,
state,
host='node0')
notification = self.m_notifier.warning.call_args[1]
payload = notification['payload']
self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id)
self.assertDictEqual({
'watcher_object.data': {
'last_seen_up': '2016-09-22T08:32:06Z',
'name': 'watcher-service',
'sevice_host': 'controller',
'status_update': {
'watcher_object.data': {
'old_state': 'ACTIVE',
'state': 'FAILED'
},
'watcher_object.name': 'ServiceStatusUpdatePayload',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.0'
}
},
'watcher_object.name': 'ServiceUpdatePayload',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.0'
},
payload
)
| [((30, 1, 30, 52), 'freezegun.freeze_time', 'freezegun.freeze_time', ({(30, 23, 30, 51): '"""2016-10-18T09:52:05.219414"""'}, {}), "('2016-10-18T09:52:05.219414')", False, 'import freezegun\n'), ((35, 25, 35, 63), 'mock.patch.object', 'mock.patch.object', ({(35, 43, 35, 46): 'rpc', (35, 48, 35, 62): '"""get_notifier"""'}, {}), "(rpc, 'get_notifier')", False, 'import mock\n'), ((38, 26, 38, 53), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((47, 41, 47, 52), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((50, 50, 50, 66), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((48, 52, 48, 78), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n')] |
rainzhop/cumulus-tank | leetcode/medium/best-time-to-buy-and-sell-stock-ii.py | 09ebc7858ea53630e30606945adfea856a80faa3 | # https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/
#
# Say you have an array for which the ith element is the price of a given stock on day i.
#
# Design an algorithm to find the maximum profit.
# You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times).
# However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if prices == []:
return 0
profit_list = []
min_val = prices[0]
max_val = prices[0]
tend = 0 # 0:down, 1:up
for i in range(1, len(prices)):
if prices[i] < prices[i - 1]:
# go down
if tend == 1:
max_val = prices[i - 1]
profit_list.append(max_val - min_val)
tend = 0
pass
if prices[i] > prices[i - 1]:
# go up
if tend == 0:
min_val = prices[i - 1]
tend = 1
pass
if tend == 1:
profit_list.append(prices[i] - min_val)
return sum(profit_list)
if __name__ == '__main__':
prices = [8,9,2,5]
s = Solution()
print s.maxProfit(prices)
| [] |
yashikajotwani12/django-loci | django_loci/tests/base/test_admin.py | 2c0bcb33f4a56d559f798e37fd17b2143b912ce4 | import json
import os
import responses
from django.urls import reverse
from .. import TestAdminMixin, TestLociMixin
class BaseTestAdmin(TestAdminMixin, TestLociMixin):
geocode_url = 'https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/'
def test_location_list(self):
self._login_as_admin()
self._create_location(name='test-admin-location-1')
url = reverse('{0}_location_changelist'.format(self.url_prefix))
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_floorplan_list(self):
self._login_as_admin()
self._create_floorplan()
self._create_location()
url = reverse('{0}_floorplan_changelist'.format(self.url_prefix))
r = self.client.get(url)
self.assertContains(r, '1st floor')
def test_location_json_view(self):
self._login_as_admin()
loc = self._create_location()
r = self.client.get(reverse('admin:django_loci_location_json', args=[loc.pk]))
expected = {
'name': loc.name,
'address': loc.address,
'type': loc.type,
'is_mobile': loc.is_mobile,
'geometry': json.loads(loc.geometry.json),
}
self.assertDictEqual(r.json(), expected)
def test_location_floorplan_json_view(self):
self._login_as_admin()
fl = self._create_floorplan()
r = self.client.get(
reverse('admin:django_loci_location_floorplans_json', args=[fl.location.pk])
)
expected = {
'choices': [
{
'id': str(fl.pk),
'str': str(fl),
'floor': fl.floor,
'image': fl.image.url,
'image_width': fl.image.width,
'image_height': fl.image.height,
}
]
}
self.assertDictEqual(r.json(), expected)
def test_location_change_image_removed(self):
self._login_as_admin()
loc = self._create_location(name='test-admin-location-1', type='indoor')
fl = self._create_floorplan(location=loc)
# remove floorplan image
os.remove(fl.image.path)
url = reverse('{0}_location_change'.format(self.url_prefix), args=[loc.pk])
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_floorplan_change_image_removed(self):
self._login_as_admin()
loc = self._create_location(name='test-admin-location-1', type='indoor')
fl = self._create_floorplan(location=loc)
# remove floorplan image
os.remove(fl.image.path)
url = reverse('{0}_floorplan_change'.format(self.url_prefix), args=[fl.pk])
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_is_mobile_location_json_view(self):
self._login_as_admin()
loc = self._create_location(is_mobile=True, geometry=None)
response = self.client.get(
reverse('admin:django_loci_location_json', args=[loc.pk])
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content['geometry'], None)
loc1 = self._create_location(
name='location2', address='loc2 add', type='outdoor'
)
response1 = self.client.get(
reverse('admin:django_loci_location_json', args=[loc1.pk])
)
self.assertEqual(response1.status_code, 200)
content1 = json.loads(response1.content)
expected = {
'name': 'location2',
'address': 'loc2 add',
'type': 'outdoor',
'is_mobile': False,
'geometry': {'type': 'Point', 'coordinates': [12.512124, 41.898903]},
}
self.assertEqual(content1, expected)
@responses.activate
def test_geocode(self):
self._login_as_admin()
address = 'Red Square'
url = '{0}?address={1}'.format(
reverse('admin:django_loci_location_geocode_api'), address
)
# Mock HTTP request to the URL to work offline
responses.add(
responses.GET,
f'{self.geocode_url}findAddressCandidates?singleLine=Red+Square&f=json&maxLocations=1',
body=self._load_content('base/static/test-geocode.json'),
content_type='application/json',
)
response = self.client.get(url)
response_lat = round(response.json()['lat'])
response_lng = round(response.json()['lng'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response_lat, 56)
self.assertEqual(response_lng, 38)
def test_geocode_no_address(self):
self._login_as_admin()
url = reverse('admin:django_loci_location_geocode_api')
response = self.client.get(url)
expected = {'error': 'Address parameter not defined'}
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), expected)
@responses.activate
def test_geocode_invalid_address(self):
self._login_as_admin()
invalid_address = 'thisaddressisnotvalid123abc'
url = '{0}?address={1}'.format(
reverse('admin:django_loci_location_geocode_api'), invalid_address
)
responses.add(
responses.GET,
f'{self.geocode_url}findAddressCandidates?singleLine=thisaddressisnotvalid123abc'
'&f=json&maxLocations=1',
body=self._load_content('base/static/test-geocode-invalid-address.json'),
content_type='application/json',
)
response = self.client.get(url)
expected = {'error': 'Not found location with given name'}
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), expected)
@responses.activate
def test_reverse_geocode(self):
self._login_as_admin()
lat = 52
lng = 21
url = '{0}?lat={1}&lng={2}'.format(
reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng
)
# Mock HTTP request to the URL to work offline
responses.add(
responses.GET,
f'{self.geocode_url}reverseGeocode?location=21.0%2C52.0&f=json&outSR=4326',
body=self._load_content('base/static/test-reverse-geocode.json'),
content_type='application/json',
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'POL')
@responses.activate
def test_reverse_location_with_no_address(self):
self._login_as_admin()
lat = -30
lng = -30
url = '{0}?lat={1}&lng={2}'.format(
reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng
)
responses.add(
responses.GET,
f'{self.geocode_url}reverseGeocode?location=-30.0%2C-30.0&f=json&outSR=4326',
body=self._load_content(
'base/static/test-reverse-location-with-no-address.json'
),
content_type='application/json',
)
response = self.client.get(url)
response_address = response.json()['address']
self.assertEqual(response.status_code, 404)
self.assertEqual(response_address, '')
def test_reverse_geocode_no_coords(self):
self._login_as_admin()
url = reverse('admin:django_loci_location_reverse_geocode_api')
response = self.client.get(url)
expected = {'error': 'lat or lng parameter not defined'}
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), expected)
| [((66, 8, 66, 32), 'os.remove', 'os.remove', ({(66, 18, 66, 31): 'fl.image.path'}, {}), '(fl.image.path)', False, 'import os\n'), ((76, 8, 76, 32), 'os.remove', 'os.remove', ({(76, 18, 76, 31): 'fl.image.path'}, {}), '(fl.image.path)', False, 'import os\n'), ((88, 18, 88, 46), 'json.loads', 'json.loads', ({(88, 29, 88, 45): 'response.content'}, {}), '(response.content)', False, 'import json\n'), ((97, 19, 97, 48), 'json.loads', 'json.loads', ({(97, 30, 97, 47): 'response1.content'}, {}), '(response1.content)', False, 'import json\n'), ((130, 14, 130, 63), 'django.urls.reverse', 'reverse', ({(130, 22, 130, 62): '"""admin:django_loci_location_geocode_api"""'}, {}), "('admin:django_loci_location_geocode_api')", False, 'from django.urls import reverse\n'), ((197, 14, 197, 71), 'django.urls.reverse', 'reverse', ({(197, 22, 197, 70): '"""admin:django_loci_location_reverse_geocode_api"""'}, {}), "('admin:django_loci_location_reverse_geocode_api')", False, 'from django.urls import reverse\n'), ((31, 28, 31, 85), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((37, 24, 37, 53), 'json.loads', 'json.loads', ({(37, 35, 37, 52): 'loc.geometry.json'}, {}), '(loc.geometry.json)', False, 'import json\n'), ((45, 12, 45, 88), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((85, 12, 85, 69), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((94, 12, 94, 70), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((112, 12, 112, 61), 'django.urls.reverse', 'reverse', ({(112, 20, 112, 60): '"""admin:django_loci_location_geocode_api"""'}, {}), "('admin:django_loci_location_geocode_api')", False, 'from django.urls import reverse\n'), ((141, 12, 141, 61), 'django.urls.reverse', 'reverse', ({(141, 20, 141, 60): '"""admin:django_loci_location_geocode_api"""'}, {}), "('admin:django_loci_location_geocode_api')", False, 'from django.urls import reverse\n'), ((161, 12, 161, 69), 'django.urls.reverse', 'reverse', ({(161, 20, 161, 68): '"""admin:django_loci_location_reverse_geocode_api"""'}, {}), "('admin:django_loci_location_reverse_geocode_api')", False, 'from django.urls import reverse\n'), ((180, 12, 180, 69), 'django.urls.reverse', 'reverse', ({(180, 20, 180, 68): '"""admin:django_loci_location_reverse_geocode_api"""'}, {}), "('admin:django_loci_location_reverse_geocode_api')", False, 'from django.urls import reverse\n')] |
vatsalag99/Deformable-Channel-Attention | dca_models/deform_offsets_module.py | d904135fd7be45331a16d9cb84e44f8e1ff5c07e | import torch
from torch import nn
from torch.nn.parameter import Parameter
from einops import rearrange, reduce, repeat
class dca_offsets_layer(nn.Module):
"""Constructs a Offset Generation module.
"""
def __init__(self, channel, n_offsets):
super(dca_offsets_layer, self).__init__()
self.channel = channel
self.n_offsets = n_offsets
def covariance_features(self, x):
"""
Takes in a feature map and returns the unnormalized covariance matrix
"""
m_batchsize, C, height, width = x.size()
x = x - x.mean(dim=1, keepdim=True) / (x.std(dim=1, keepdim=True) + 1e-5)
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
return energy
def forward(self, x):
m_batchsize, C, height, width = x.size()
cov_matrix = self.covariance_features(x).reshape(m_batchsize, C, 1, C)
_, locations = torch.topk(cov_matrix, self.n_offsets, dim=1)
delta = torch.stack(self.n_offsets*[torch.arange(0, self.channel)], dim=0)
delta = torch.stack(m_batchsize * [delta], dim=0)
offsets = locations.squeeze() - delta.cuda()
return offsets
| [((24, 17, 24, 48), 'torch.bmm', 'torch.bmm', ({(24, 27, 24, 37): 'proj_query', (24, 39, 24, 47): 'proj_key'}, {}), '(proj_query, proj_key)', False, 'import torch\n'), ((31, 23, 31, 68), 'torch.topk', 'torch.topk', (), '', False, 'import torch\n'), ((33, 16, 33, 57), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((32, 44, 32, 73), 'torch.arange', 'torch.arange', ({(32, 57, 32, 58): '0', (32, 60, 32, 72): 'self.channel'}, {}), '(0, self.channel)', False, 'import torch\n')] |
egor43/PyImageComparsion | tests/__init__.py | 5270f5646c40391cc5ac225305d7be9b0b7de140 | from . import test_helpers
from . import test_image_opener
from . import test_image_metrick
from . import test_compare_tools
from . import test_compare_api | [] |
donnellan0007/blog | core/urls.py | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | from django.contrib import admin
from django.urls import path
from .views import index, email, post_detail, posts, hot_takes, take_detail
from . import views
app_name = "core"
urlpatterns = [
path('',views.index,name="index"),
path('email/',views.email,name="email"),
path('post/<slug>/',views.post_detail,name='post'),
path('posts/',views.posts,name='posts'),
path('takes/',views.hot_takes,name='takes'),
path('take/<slug>/',views.take_detail,name='take'),
] | [((9, 4, 9, 37), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((10, 4, 10, 43), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((11, 4, 11, 54), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((12, 4, 12, 43), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((13, 4, 13, 47), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((14, 4, 14, 54), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n')] |
pgolding/pandas-grid | griddy/__init__.py | 0f80db1511097656496dee503d7bb281b97b8bdc | from .grid import render_table | [] |
iamvishnuks/Xmigrate | utils/dbconn.py | f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7 | from mongoengine import *
from dotenv import load_dotenv
from os import getenv
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from cassandra.query import ordered_dict_factory
from model.discover import *
from model.blueprint import *
from model.disk import *
from model.storage import *
from model.project import *
from model.network import *
from model.user import *
load_dotenv()
cass_db = getenv("CASS_DB")
cass_password = getenv("CASS_PASSWORD")
cass_user = getenv("CASS_USER")
def create_db_con():
auth_provider = PlainTextAuthProvider(username=cass_user, password=cass_password)
cluster = Cluster([cass_db],auth_provider=auth_provider)
session = cluster.connect()
session.execute("""
CREATE KEYSPACE IF NOT EXISTS migration
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""")
session.set_keyspace('migration')
session.row_factory = ordered_dict_factory
connection.setup([cass_db], "migration",protocol_version=3,auth_provider=auth_provider)
sync_table(BluePrint)
sync_table(Discover)
sync_table(Project)
sync_table(Network)
sync_table(Subnet)
sync_table(Storage)
sync_table(Bucket)
sync_table(GcpBucket)
sync_table(User)
sync_table(Disk)
session.execute("CREATE INDEX IF NOT EXISTS ON blue_print (network);")
session.execute("CREATE INDEX IF NOT EXISTS ON blue_print (subnet);")
return session
| [((17, 0, 17, 13), 'dotenv.load_dotenv', 'load_dotenv', ({}, {}), '()', False, 'from dotenv import load_dotenv\n'), ((19, 10, 19, 27), 'os.getenv', 'getenv', ({(19, 17, 19, 26): '"""CASS_DB"""'}, {}), "('CASS_DB')", False, 'from os import getenv\n'), ((20, 16, 20, 39), 'os.getenv', 'getenv', ({(20, 23, 20, 38): '"""CASS_PASSWORD"""'}, {}), "('CASS_PASSWORD')", False, 'from os import getenv\n'), ((21, 12, 21, 31), 'os.getenv', 'getenv', ({(21, 19, 21, 30): '"""CASS_USER"""'}, {}), "('CASS_USER')", False, 'from os import getenv\n'), ((24, 20, 24, 85), 'cassandra.auth.PlainTextAuthProvider', 'PlainTextAuthProvider', (), '', False, 'from cassandra.auth import PlainTextAuthProvider\n'), ((25, 14, 25, 60), 'cassandra.cluster.Cluster', 'Cluster', (), '', False, 'from cassandra.cluster import Cluster\n'), ((33, 4, 33, 91), 'cassandra.cqlengine.connection.setup', 'connection.setup', (), '', False, 'from cassandra.cqlengine import connection\n'), ((34, 4, 34, 25), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(34, 15, 34, 24): 'BluePrint'}, {}), '(BluePrint)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((35, 4, 35, 24), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(35, 15, 35, 23): 'Discover'}, {}), '(Discover)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((36, 4, 36, 23), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(36, 15, 36, 22): 'Project'}, {}), '(Project)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((37, 4, 37, 23), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(37, 15, 37, 22): 'Network'}, {}), '(Network)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((38, 4, 38, 22), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(38, 15, 38, 21): 'Subnet'}, {}), '(Subnet)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((39, 4, 39, 23), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(39, 15, 39, 22): 'Storage'}, {}), '(Storage)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((40, 4, 40, 22), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(40, 15, 40, 21): 'Bucket'}, {}), '(Bucket)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((41, 4, 41, 25), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(41, 15, 41, 24): 'GcpBucket'}, {}), '(GcpBucket)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((42, 4, 42, 20), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(42, 15, 42, 19): 'User'}, {}), '(User)', False, 'from cassandra.cqlengine.management import sync_table\n'), ((43, 4, 43, 20), 'cassandra.cqlengine.management.sync_table', 'sync_table', ({(43, 15, 43, 19): 'Disk'}, {}), '(Disk)', False, 'from cassandra.cqlengine.management import sync_table\n')] |
godormad/PySyft | src/syft/lib/__init__.py | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | # stdlib
import importlib
import sys
from typing import Any
from typing import Any as TypeAny
from typing import Dict as TypeDict
from typing import Optional
# third party
from packaging import version
# syft relative
from ..ast.globals import Globals
from ..lib.python import create_python_ast
from ..lib.torch import create_torch_ast
from ..lib.torchvision import create_torchvision_ast
from ..logger import critical
from ..logger import traceback_and_raise
from .misc import create_union_ast
class VendorLibraryImportException(Exception):
pass
def vendor_requirements_available(vendor_requirements: TypeDict[str, TypeAny]) -> bool:
# see if python version is supported
if "python" in vendor_requirements:
python_reqs = vendor_requirements["python"]
PYTHON_VERSION = sys.version_info
min_version = python_reqs.get("min_version", None)
if min_version is not None:
if PYTHON_VERSION < min_version:
traceback_and_raise(
VendorLibraryImportException(
f"Unable to load {vendor_requirements['lib']}."
+ f"Python: {PYTHON_VERSION} < {min_version}"
)
)
# see if torch version is supported
if "torch" in vendor_requirements:
torch_reqs = vendor_requirements["torch"]
# third party
import torch
TORCH_VERSION = version.parse(torch.__version__.split("+")[0])
min_version = torch_reqs.get("min_version", None)
if min_version is not None:
if TORCH_VERSION < version.parse(min_version):
traceback_and_raise(
VendorLibraryImportException(
f"Unable to load {vendor_requirements['lib']}."
+ f"Torch: {TORCH_VERSION} < {min_version}"
)
)
return True
def load_lib(lib: str, options: TypeDict[str, TypeAny] = {}) -> None:
try:
_ = importlib.import_module(lib)
vendor_ast = importlib.import_module(f"syft.lib.{lib}")
PACKAGE_SUPPORT = getattr(vendor_ast, "PACKAGE_SUPPORT", None)
PACKAGE_SUPPORT.update(options)
if PACKAGE_SUPPORT is not None and vendor_requirements_available(
vendor_requirements=PACKAGE_SUPPORT
):
update_ast = getattr(vendor_ast, "update_ast", None)
if update_ast is not None:
global lib_ast
update_ast(ast_or_client=lib_ast)
for _, client in lib_ast.registered_clients.items():
update_ast(ast_or_client=client)
# cache the constructor for future created clients
lib_ast.loaded_lib_constructors[lib] = update_ast
except VendorLibraryImportException as e:
critical(e)
except Exception as e:
critical(f"Unable to load package support for: {lib}. {e}")
# now we need to load the relevant frameworks onto the node
def create_lib_ast(client: Optional[Any] = None) -> Globals:
python_ast = create_python_ast(client=client)
torch_ast = create_torch_ast(client=client)
torchvision_ast = create_torchvision_ast(client=client)
# numpy_ast = create_numpy_ast()
lib_ast = Globals(client=client)
lib_ast.add_attr(attr_name="syft", attr=python_ast.attrs["syft"])
lib_ast.add_attr(attr_name="torch", attr=torch_ast.attrs["torch"])
lib_ast.add_attr(attr_name="torchvision", attr=torchvision_ast.attrs["torchvision"])
# let the misc creation be always the last, as it needs the full ast solved
# to properly generated unions
union_misc_ast = getattr(getattr(create_union_ast(lib_ast, client), "syft"), "lib")
misc_root = getattr(getattr(lib_ast, "syft"), "lib")
misc_root.add_attr(attr_name="misc", attr=union_misc_ast.attrs["misc"])
return lib_ast
lib_ast = create_lib_ast(None)
| [((64, 12, 64, 40), 'importlib.import_module', 'importlib.import_module', ({(64, 36, 64, 39): 'lib'}, {}), '(lib)', False, 'import importlib\n'), ((65, 21, 65, 63), 'importlib.import_module', 'importlib.import_module', ({(65, 45, 65, 62): 'f"""syft.lib.{lib}"""'}, {}), "(f'syft.lib.{lib}')", False, 'import importlib\n'), ((48, 38, 48, 66), 'torch.__version__.split', 'torch.__version__.split', ({(48, 62, 48, 65): '"""+"""'}, {}), "('+')", False, 'import torch\n'), ((51, 31, 51, 57), 'packaging.version.parse', 'version.parse', ({(51, 45, 51, 56): 'min_version'}, {}), '(min_version)', False, 'from packaging import version\n')] |
GavinHaLab/Griffin | scripts/griffin_GC_counts.py | 83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import pandas as pd
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# map_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mapable_path=args.mapable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
map_q = args.map_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_regions = "'+mapable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmap_q = '+str(map_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mapable_name):
os.mkdir(out_dir +'/'+mapable_name)
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/')
# In[ ]:
#import filter
mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',len(mapable_intervals))
sys.stdout.flush()
# In[ ]:
def collect_reads(sublist):
#create a dict for holding the frequency of each read length and GC content
GC_dict = {}
for length in range(size_range[0],size_range[1]+1):
GC_dict[length]={}
for num_GC in range(0,length+1):
GC_dict[length][num_GC]=0
#import the bam file
#this needs to be done within the loop otherwise it gives a truncated file warning
bam_file = pysam.AlignmentFile(bam_file_path, "rb")
print('sublist intervals:',len(sublist))
#this might also need to be in the loop
#import the ref_seq
ref_seq=pysam.FastaFile(ref_seq_path)
for i in range(len(sublist)):
chrom = sublist.iloc[i][0]
start = sublist.iloc[i][1]
end = sublist.iloc[i][2]
if i%5000==0:
print('interval',i,':',chrom,start,end,'seconds:',np.round(time.time()-start_time))
sys.stdout.flush()
#fetch any read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatically)
fetched = bam_file.fetch(chrom,start,end)
for read in fetched:
#use both fw (positive template length) and rv (negative template length) reads
if (read.is_reverse==False and read.template_length>=size_range[0] and read.template_length<=size_range[1]) or (read.is_reverse==True and -read.template_length>=size_range[0] and -read.template_length<=size_range[1]):
#qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these
if read.is_paired==True and read.mapping_quality>=map_q and read.is_duplicate==False and read.is_qcfail==False:
if read.is_reverse==False:
read_start = read.reference_start
read_end = read.reference_start+read.template_length
elif read.is_reverse==True:
read_end = read.reference_start + read.reference_length
read_start = read_end + read.template_length
fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end)
#tally up the GC content
fragment_seq=fragment_seq.replace('g','G').replace('c','C').replace('a','A').replace('t','T').replace('n','N')
# #################
# ##logic check####
# #################
# if read.is_reverse==False:
# if fragment_seq[0:read.reference_length]==read.query_sequence and len(fragment_seq)==read.template_length:
# print('fw match',read.reference_length)
# else:
# print(fragment_seq[0:read.reference_length],read.reference_length,'fw')
# print(read.query_sequence,len(read.query_sequence),'fw')
# print(len(fragment_seq),read.template_length)
# print('\n')
# elif read.is_reverse==True:
# if fragment_seq[-read.reference_length:]==read.query_sequence and len(fragment_seq)==-read.template_length:
# print('rv match',read.reference_length)
# else:
# print(fragment_seq[-read.reference_length:],read.reference_length,'rv')
# print(read.query_sequence,len(read.query_sequence),'rv')
# print(len(fragment_seq),read.template_length)
# print('\n')
# #################
#split and convert to numpy array
fragment_seq = np.array(list(fragment_seq))
#replace with values
fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1
fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0
fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always get an integer) #should be very rare if the filter is done right
fragment_seq = fragment_seq.astype(int)
num_GC = int(fragment_seq.sum())
GC_dict[abs(read.template_length)][num_GC]+=1
print('done')
return(GC_dict)
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.map(collect_reads, sublists, 1)
# In[ ]:
all_GC_df = pd.DataFrame()
for i,GC_dict in enumerate(GC_dict_list):
GC_df = pd.DataFrame()
for length in GC_dict.keys():
current = pd.Series(GC_dict[length]).reset_index()
current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'})
current['length']=length
current = current[['length','num_GC','number_of_fragments']]
GC_df = GC_df.append(current, ignore_index=True)
GC_df = GC_df.set_index(['length','num_GC'])
all_GC_df[i] = GC_df['number_of_fragments']
del(GC_df,GC_dict)
all_GC_df = all_GC_df.sum(axis=1)
all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'})
all_GC_df = all_GC_df.reset_index()
all_GC_df.to_csv(out_file,sep='\t',index=False)
# In[ ]:
print('done')
# In[ ]:
# In[ ]:
# In[ ]:
| [((42, 9, 42, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((114, 20, 114, 68), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((123, 0, 123, 18), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((210, 13, 210, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((211, 4, 211, 23), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n'), ((212, 11, 212, 48), 'numpy.array_split', 'np.array_split', ({(212, 26, 212, 43): 'mapable_intervals', (212, 44, 212, 47): 'CPU'}, {}), '(mapable_intervals, CPU)', True, 'import numpy as np\n'), ((220, 12, 220, 26), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((104, 7, 104, 48), 'os.path.exists', 'os.path.exists', ({(104, 22, 104, 47): "(out_dir + '/' + mapable_name)"}, {}), "(out_dir + '/' + mapable_name)", False, 'import os\n'), ((105, 4, 105, 39), 'os.mkdir', 'os.mkdir', ({(105, 13, 105, 38): "(out_dir + '/' + mapable_name)"}, {}), "(out_dir + '/' + mapable_name)", False, 'import os\n'), ((106, 7, 106, 62), 'os.path.exists', 'os.path.exists', ({(106, 22, 106, 61): "(out_dir + '/' + mapable_name + '/GC_counts/')"}, {}), "(out_dir + '/' + mapable_name + '/GC_counts/')", False, 'import os\n'), ((107, 4, 107, 53), 'os.mkdir', 'os.mkdir', ({(107, 13, 107, 52): "(out_dir + '/' + mapable_name + '/GC_counts/')"}, {}), "(out_dir + '/' + mapable_name + '/GC_counts/')", False, 'import os\n'), ((139, 15, 139, 55), 'pysam.AlignmentFile', 'pysam.AlignmentFile', ({(139, 35, 139, 48): 'bam_file_path', (139, 50, 139, 54): '"""rb"""'}, {}), "(bam_file_path, 'rb')", False, 'import pysam\n'), ((144, 12, 144, 41), 'pysam.FastaFile', 'pysam.FastaFile', ({(144, 28, 144, 40): 'ref_seq_path'}, {}), '(ref_seq_path)', False, 'import pysam\n'), ((222, 12, 222, 26), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((234, 12, 234, 35), 'pandas.DataFrame', 'pd.DataFrame', ({(234, 25, 234, 34): 'all_GC_df'}, {}), '(all_GC_df)', True, 'import pandas as pd\n'), ((152, 12, 152, 30), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((224, 18, 224, 44), 'pandas.Series', 'pd.Series', ({(224, 28, 224, 43): 'GC_dict[length]'}, {}), '(GC_dict[length])', True, 'import pandas as pd\n'), ((197, 54, 197, 74), 'numpy.random.randint', 'np.random.randint', ({(197, 72, 197, 73): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((151, 71, 151, 82), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
canovasjm/InterviewProject_JuanCanovas | task2/04-task2-upload-dim-tables.py | 6ff385c66664328cea0678454560e89e44851e24 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 18:17:07 2021
@author: jm
"""
# %% required libraries
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
# %% connect to DB
# create connection using pymssql
engine = create_engine('mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga')
connection = engine.connect()
# %% read data sets from where I will build the dimension tables
# read employee roster data
employee_roster = pd.read_excel("datasources/Employee_Roster_Data.xlsx", sheet_name = 'Sheet1')
# read skills data
skills = pd.read_excel("datasources/skills.xlsx", sheet_name = "Sheet1")
# read hours data
hours = pd.read_excel("datasources/hours.xlsx", sheet_name = "Sheet1")
# %% dimensions created from source employee_roster
# %% create DIM_Currency
# get unique values
currencies = sorted(employee_roster['Currency'].unique())
# create a data frame
DIM_Currency = pd.DataFrame({'id_currency': (np.arange(len(currencies)) + 1), 'currency': currencies})
# send data frame to DB
DIM_Currency.to_sql('DIM_Currency', con = connection, if_exists = 'append', index = False)
# %% create DIM_Department
# get unique values
departments = sorted(pd.concat([employee_roster['Department'], skills['Department']], axis = 0).unique())
# create a data frame
DIM_Department = pd.DataFrame({'id_department': (np.arange(len(departments)) + 1), 'department': departments})
# send data frame to DB
DIM_Department.to_sql('DIM_Department', con = connection, if_exists = 'append', index = False)
# %% create DIM_Gender
# get unique values
genders = sorted(pd.concat([employee_roster['Gender'], skills['Gender']], axis = 0).unique())
# create a data frame
DIM_Gender = pd.DataFrame({'id_gender': (np.arange(len(genders)) + 1), 'gender': genders})
# send data frame to DB
DIM_Gender.to_sql('DIM_Gender', con = connection, if_exists = 'append', index = False)
# %% create DIM_User
# check if 'UserId' values in 'skills' are in 'User_ID' in 'employee_roster'
# we get 20134 'True' values, meaning that all 'UserId' in 'skills' are already
# in 'User_ID' in employee_roster
users_check_1 = np.isin(skills['UserId'], employee_roster['User_ID']).sum()
# check if 'UserId' values in 'hours' are in 'User_ID' in 'employee_roster'
# we get 7659 'True' values, meaning that NOT all 'UserId' in 'hours' are already
# in 'User_ID' in employee_roster
users_check_2 = np.isin(hours['UserId'], employee_roster['User_ID']).sum()
# get unique values
users = sorted(pd.concat([employee_roster['User_ID'], skills['UserId'], hours['UserId']], axis = 0).unique())
# create a data frame to use pd.merge()
df_users = pd.DataFrame({'User_ID': users})
# left join 'df_user' with 'employee_roster' on 'UserID'
users_final = pd.merge(df_users, employee_roster, on = 'User_ID', how ='left')
# select only columns I need
users_final = users_final[['User_ID', 'Email_ID', 'Fullname']]
# rename columns
users_final.rename(columns = {'User_ID': 'id_user', 'Email_ID': 'id_email', 'Fullname': 'fullname'}, inplace = True)
# send data frame to DB
users_final.to_sql('DIM_User', con = connection, if_exists = 'append', index = False)
# %% dimensions created from source skills
# %% create DIM_AttributeGroup
# get unique values
att_group = sorted(skills['Attribute Group'].unique())
# create a data frame
DIM_AttributeGroup = pd.DataFrame({'id_att_group': (np.arange(len(att_group)) + 1), 'attribute_group': att_group})
# send data frame to DB
DIM_AttributeGroup.to_sql('DIM_AttributeGroup', con = connection, if_exists = 'append', index = False)
# %% create DIM_AttributeSubGroup
# get unique values
att_sub_group = sorted(skills['Attribute Sub-Group'].unique())
# create a data frame
DIM_AttributeSubGroup = pd.DataFrame({'id_att_sub_group': (np.arange(len(att_sub_group)) + 1), 'attribute_sub_group': att_sub_group})
# send data frame to DB
DIM_AttributeSubGroup.to_sql('DIM_AttributeSubGroup', con = connection, if_exists = 'append', index = False)
# %% create DIM_AttributeName
# get unique values
att_name = sorted(skills['Attribute Name'].unique())
# create a data frame
DIM_AttributeName = pd.DataFrame({'id_att_name': (np.arange(len(att_name)) + 1), 'attribute_name': att_name})
# send data frame to DB
DIM_AttributeName.to_sql('DIM_AttributeName', con = connection, if_exists = 'append', index = False)
| [((17, 9, 17, 85), 'sqlalchemy.create_engine', 'create_engine', ({(17, 23, 17, 84): '"""mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga"""'}, {}), "('mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga')", False, 'from sqlalchemy import create_engine\n'), ((23, 18, 23, 95), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((26, 9, 26, 72), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((29, 8, 29, 70), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((79, 11, 79, 43), 'pandas.DataFrame', 'pd.DataFrame', ({(79, 24, 79, 42): "{'User_ID': users}"}, {}), "({'User_ID': users})", True, 'import pandas as pd\n'), ((82, 14, 82, 78), 'pandas.merge', 'pd.merge', (), '', True, 'import pandas as pd\n'), ((68, 16, 68, 69), 'numpy.isin', 'np.isin', ({(68, 24, 68, 40): "skills['UserId']", (68, 42, 68, 68): "employee_roster['User_ID']"}, {}), "(skills['UserId'], employee_roster['User_ID'])", True, 'import numpy as np\n'), ((73, 16, 73, 68), 'numpy.isin', 'np.isin', ({(73, 24, 73, 39): "hours['UserId']", (73, 41, 73, 67): "employee_roster['User_ID']"}, {}), "(hours['UserId'], employee_roster['User_ID'])", True, 'import numpy as np\n'), ((46, 21, 46, 95), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((56, 17, 56, 83), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((76, 15, 76, 99), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n')] |
vikasbaghel1001/Kanna-Chan | cogs/server.py | 6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb | import discord
from discord.ext import commands
arrow = "<a:right:877425183839891496>"
kwee = "<:kannawee:877036162122924072>"
kdance = "<a:kanna_dance:877038778798207016>"
kbored = "<:kanna_bored:877036162827583538>"
ksmug = "<:kanna_smug:877038777896427560>"
heart = "<a:explosion_heart:877426228775227392>"
class Server(commands.Cog):
def __init__(self, client):
self.client = client
self.kana_id = 857835279259664403
@commands.command()
@commands.is_owner()
async def sabout(self, ctx):
kana = self.client.get_user(self.kana_id)
about_file = discord.File("./images/about_server.png")
await ctx.send(file = about_file)
emb = discord.Embed(title=f"{kdance} ABOUT SERVER {kdance}",description = f"{arrow} **DRAGON LOLI'S HOME** is the official Server of the bot **Kanna Chan**. It's a friendly community meant for having fun, chilling and spending time with others.\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!", color=0xfc74c6)
emb.add_field(
name=f"{kwee} __ROLES__",
value=f"{arrow} <@&876800883441156138> The highest role supposed to be only for Kanna Chan.\n{arrow} <@&876817811396263946> Admins of the Server and have the highest power and authority after owner.\n{arrow} <@&876818242058997791> Moderators of the server meant to moderate the chat and maintain a positive environment in community.\n{arrow} <@&876801038420701196> Developer(s) of Kanna Chan have this role.\n{arrow} <@&876804164661944340> All other users who join this server get this role by default. They have image and embed perms by deault.\n{arrow} **PS: APART FROM THESE SELF-ROLES ARE ALSO AVAIALBLE FOR MEMBERS.**",
inline=False
)
emb.add_field(
name=f"{ksmug} __CHANNELS__",
value=f"{arrow} <#877030933847490691> Read the rules here.\n{arrow} <#877031867440832574> Channel for grabbing self-roles.\n{arrow} <#876798564704084011> The general chat for the server.\n{arrow} <#876798809819189249> Bot Commands should be executed here.\n{arrow} <#876798696078065694> You can give suggestions for improving Kanna Chan here.\n{arrow} <#876798720254029864> You can report BUGS here if you find any in Kanna Chan.\n{arrow} <#876798750876651530> For any other support or query use this channel.\n{arrow} **P.S: YOU CAN PING ANY STAFF MEMBER OR DEVELOPER WHILE REPORTING BUG OR IN CASE OF ANY QUERY.**",
inline=False
)
emb.set_footer(
text="Kanna Chan",
icon_url=kana.avatar_url
)
await ctx.send(embed=emb)
@commands.command()
@commands.is_owner()
async def rule(self, ctx):
kana = self.client.get_user(self.kana_id)
rule_file = discord.File("./images/rules.png")
await ctx.send(file=rule_file)
emb = discord.Embed(title=f"{kbored} RULES {kbored}", color=0xfc74c6)
emb.add_field(
name=f"{heart} **Be respectful**",
value=f"You must respect all users, regardless of your liking towards them. Treat others the way you want to be treated.",
inline=False
)
emb.add_field(
name=f"{heart} **No Inappropriate Language**",
value=f"{arrow} The use of profanity should be kept to a minimum. However, any derogatory language towards any user is prohibited.",
inline=False
)
emb.add_field(
name=f"{heart} **No spamming**",
value=f"{arrow} Don't send a lot of small messages right after each other. Do not disrupt chat by spamming.",
inline=False
)
emb.add_field(
name=f"{heart} **No pornographic/adult/other NSFW material**",
value=f"{arrow} This is a community server and not meant to share this kind of material.",
inline=False
)
emb.add_field(
name=f"{heart} **No advertisements**",
value=f"{arrow} We do not tolerate any kind of advertisements, whether it be for other communities or streams. You can post your content in the media channel if it is relevant and provides actual value (Video/Art)",
inline=False
)
emb.add_field(
name=f"{heart} **No offensive names and profile pictures**",
value=f"{arrow} You will be asked to change your name or picture if the staff deems them inappropriate.",
inline=False
)
emb.add_field(
name=f"{heart} **Server Raiding**",
value=f"{arrow} Raiding or mentions of raiding are not allowed.",
inline=False
)
emb.add_field(
name=f"{heart} **Direct & Indirect Threats**",
value=f"{arrow} Threats to other users of DDoS, Death, DoX, abuse, and other malicious threats are absolutely prohibited and disallowed.",
inline=False
)
emb.add_field(
name=f"{heart} **Follow the Discord Community Guidelines**",
value=f"{arrow} You can find them here: https://discordapp.com/guidelines",
inline=False
)
emb.add_field(
name=f"{heart} **VOICE CHANNELS**",
value=f"{arrow} Do not join voice chat channels without permission of the people already in there.",
inline=False
)
emb.add_field(
name=f"{heart} **DECISIONS AND ISSUES**",
value = f"{arrow} ***The Admins and Mods will Mute/Kick/Ban per discretion. If you feel mistreated DM an Admin and we will resolve the issue.***",
inline=False
)
emb.add_field(
name=f"{heart} **CHANGES**",
value = f"{arrow} ***Your presence in this server implies accepting these rules, including all further changes. These changes might be done at any time without notice, it is your responsibility to check for them.***",
inline=False
)
emb.set_footer(
text="Kanna Chan",
icon_url=kana.avatar_url
)
await ctx.send(embed=emb)
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id == 876798564704084008:
if member.bot:
return
else:
member_role = member.guild.get_role(876804164661944340)
await member.add_roles(member_role)
desc = f"{member.name} Thanks for joining Kanna's Server. The server is currently under construction, Thanks for being an **early supporter**!! If you need any kind of help or support just ping any staff member or DM `aSHish#1198`. Have a nice stay in the server :)"
await member.send(desc)
else:
return
def setup(client):
client.add_cog(Server(client))
print(">> Server Utility loaded") | [((16, 5, 16, 23), 'discord.ext.commands.command', 'commands.command', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((17, 5, 17, 24), 'discord.ext.commands.is_owner', 'commands.is_owner', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((39, 5, 39, 23), 'discord.ext.commands.command', 'commands.command', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((40, 5, 40, 24), 'discord.ext.commands.is_owner', 'commands.is_owner', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((112, 5, 112, 28), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((20, 21, 20, 62), 'discord.File', 'discord.File', ({(20, 34, 20, 61): '"""./images/about_server.png"""'}, {}), "('./images/about_server.png')", False, 'import discord\n'), ((22, 14, 22, 374), 'discord.Embed', 'discord.Embed', (), '', False, 'import discord\n'), ((43, 20, 43, 54), 'discord.File', 'discord.File', ({(43, 33, 43, 53): '"""./images/rules.png"""'}, {}), "('./images/rules.png')", False, 'import discord\n'), ((45, 14, 45, 77), 'discord.Embed', 'discord.Embed', (), '', False, 'import discord\n')] |
timmartin/skulpt | test/run/t344.py | 2e3a3fbbaccc12baa29094a717ceec491a8a6750 | for ch in "Hello world!":
d = ord(ch)
h = hex(d)
o = oct(d)
b = bin(d)
print ch, d, h, o, b
| [] |
RandomGamer342/TTM4115-plantsensor | paho/mqtt/subscribe.py | e63c34160d284bb6fd26563eeba949d54026348b | # Copyright (c) 2016 Roger Light <[email protected]>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This module provides some helper functions to allow straightforward subscribing
to topics and retrieving messages. The two functions are simple(), which
returns one or messages matching a set of topics, and callback() which allows
you to pass a callback for processing of messages.
"""
import paho.mqtt.client as paho
import paho.mqtt as mqtt
import ssl
def _on_connect(c, userdata, flags, rc):
"""Internal callback"""
if rc != 0:
raise mqtt.MQTTException(paho.connack_string(rc))
if type(userdata['topics']) is list:
for t in userdata['topics']:
c.subscribe(t, userdata['qos'])
else:
c.subscribe(userdata['topics'], userdata['qos'])
def _on_message_callback(c, userdata, message):
"""Internal callback"""
userdata['callback'](c, userdata['userdata'], message)
def _on_message_simple(c, userdata, message):
"""Internal callback"""
if userdata['msg_count'] == 0:
return
# Don't process stale retained messages if 'retained' was false
if userdata['retained'] == False and message.retain == True:
return
userdata['msg_count'] = userdata['msg_count'] - 1
if userdata['messages'] is None and userdata['msg_count'] == 0:
userdata['messages'] = message
c.disconnect()
return
userdata['messages'].append(message)
if userdata['msg_count'] == 0:
c.disconnect()
def callback(callback, topics, qos=0, userdata=None, hostname="localhost",
port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None,
protocol=paho.MQTTv311, transport="tcp"):
"""Subscribe to a list of topics and process them in a callback function.
This function creates an MQTT client, connects to a broker and subscribes
to a list of topics. Incoming messages are processed by the user provided
callback. This is a blocking function and will never return.
callback : function of the form "on_message(client, userdata, message)" for
processing the messages received.
topics : either a string containing a single topic to subscribe to, or a
list of topics to subscribe to.
qos : the qos to use when subscribing. This is applied to all topics.
userdata : passed to the callback
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
transport : set to "tcp" to use the default setting of transport which is
raw TCP. Set to "websockets" to use WebSockets as the transport.
"""
if qos < 0 or qos > 2:
raise ValueError('qos must be in the range 0-2')
callback_userdata = {
'callback':callback,
'topics':topics,
'qos':qos,
'userdata':userdata}
client = paho.Client(client_id=client_id,
userdata=callback_userdata, protocol=protocol, transport=transport)
client.on_message = _on_message_callback
client.on_connect = _on_connect
if auth is not None:
username = auth['username']
try:
password = auth['password']
except KeyError:
password = None
client.username_pw_set(username, password)
if will is not None:
will_topic = will['topic']
try:
will_payload = will['payload']
except KeyError:
will_payload = None
try:
will_qos = will['qos']
except KeyError:
will_qos = 0
try:
will_retain = will['retain']
except KeyError:
will_retain = False
client.will_set(will_topic, will_payload, will_qos, will_retain)
if tls is not None:
ca_certs = tls['ca_certs']
try:
certfile = tls['certfile']
except KeyError:
certfile = None
try:
keyfile = tls['keyfile']
except KeyError:
keyfile = None
try:
tls_version = tls['tls_version']
except KeyError:
tls_version = ssl.PROTOCOL_SSLv23;
try:
ciphers = tls['ciphers']
except KeyError:
ciphers = None
client.tls_set(ca_certs, certfile, keyfile, tls_version=tls_version,
ciphers=ciphers)
client.connect(hostname, port, keepalive)
client.loop_forever()
def simple(topics, qos=0, msg_count=1, retained=True, hostname="localhost", port=1883,
client_id="", keepalive=60, will=None, auth=None, tls=None,
protocol=paho.MQTTv311, transport="tcp"):
"""Subscribe to a list of topics and return msg_count messages.
This function creates an MQTT client, connects to a broker and subscribes
to a list of topics. Once "msg_count" messages have been received, it
disconnects cleanly from the broker and returns the messages.
topics : either a string containing a single topic to subscribe to, or a
list of topics to subscribe to.
qos : the qos to use when subscribing. This is applied to all topics.
msg_count : the number of messages to retrieve from the broker.
if msg_count == 1 then a single MQTTMessage will be returned.
if msg_count > 1 then a list of MQTTMessages will be returned.
retained : If set to True, retained messages will be processed the same as
non-retained messages. If set to False, retained messages will
be ignored. This means that with retained=False and msg_count=1,
the function will return the first message received that does
not have the retained flag set.
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
transport : set to "tcp" to use the default setting of transport which is
raw TCP. Set to "websockets" to use WebSockets as the transport.
"""
if msg_count < 1:
raise ValueError('msg_count must be > 0')
# Set ourselves up to return a single message if msg_count == 1, or a list
# if > 1.
if msg_count == 1:
messages = None
else:
messages = []
userdata = {'retained':retained, 'msg_count':msg_count, 'messages':messages}
callback(_on_message_simple, topics, qos, userdata, hostname, port,
client_id, keepalive, will, auth, tls, protocol, transport)
return userdata['messages']
| [((129, 13, 130, 92), 'paho.mqtt.client.Client', 'paho.Client', (), '', True, 'import paho.mqtt.client as paho\n'), ((30, 33, 30, 56), 'paho.mqtt.client.connack_string', 'paho.connack_string', ({(30, 53, 30, 55): 'rc'}, {}), '(rc)', True, 'import paho.mqtt.client as paho\n')] |
cs-cordero/advent-of-code | py/2017/day24/aoc_day_24.py | 614b8f78b43c54ef180a7dc411a0d1366a62944f | from collections import defaultdict
def solution():
starting_components = d[0]
best_scores = []
for component in starting_components:
n_a, n_b = get_ports(component)
nxt_port = n_a if n_b == 0 else n_b
best_scores.append(recurse(component, set(), nxt_port, 0))
print("fuck", max(best_scores))
def recurse(component, seen, next_port, level):
seen.add(component)
c_a, c_b = get_ports(component)
next_components = d[next_port] - seen
my_score = sum(get_ports(component))
scores = []
for next_component in next_components:
n_a, n_b = get_ports(next_component)
nxt_port = n_a if n_b in (c_a, c_b) else n_b
score, reclevel = recurse(next_component, seen.copy(), nxt_port, level + 1)
scores.append((score, reclevel))
scores = sorted(scores, key=lambda x: (x[1], x[0]), reverse=True)
print(component, level, scores)
return my_score + (scores[0][0] if scores else 0), scores[0][1] if scores else level
def get_ports(component):
return map(int, component.split("/"))
if __name__ == "__main__":
d = defaultdict(set)
# with open('aoc_day_24_sample.txt') as f:
with open("aoc_day_24_input.txt") as f:
sample = f.readlines()
# sample = [
# '0/1',
# '1/2',
# '1/3',
# '1/4',
# '5/0',
# '2/5',
# '3/6',
# '4/500'
# ]
for component in sample:
a, b = map(int, component.split("/"))
d[a].add(component)
d[b].add(component)
solution()
| [((37, 8, 37, 24), 'collections.defaultdict', 'defaultdict', ({(37, 20, 37, 23): 'set'}, {}), '(set)', False, 'from collections import defaultdict\n')] |
Gr1m3y/scratchnet | scratchnet/scratchnet.py | 5fce471b6e12dc05b3a92fd8581445f7d598d1c3 | import numpy as np
import network
def main():
x = np.array([2, 3])
nw = network.NeuralNetwork()
print(nw.feedforward(x))
if __name__ == "__main__":
main()
| [((6, 8, 6, 24), 'numpy.array', 'np.array', ({(6, 17, 6, 23): '[2, 3]'}, {}), '([2, 3])', True, 'import numpy as np\n'), ((7, 9, 7, 32), 'network.NeuralNetwork', 'network.NeuralNetwork', ({}, {}), '()', False, 'import network\n')] |
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji | live-plotting.py | 05a61dca25ef6dc6827e3389a753eb65a09c1813 | #draw the predictions from real-time.py
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
graph_data = open('emotion.txt', 'r').read()
lines = graph_data.split('\n')
xs = []
y_angry = []
y_fear = []
y_happy = []
y_sad = []
y_surprise = []
y_neutral = []
for line in lines:
if len(line) > 1:
time, angry, fear, happy, sad, surprise, neutral = line.split(',')
xs.append(time)
y_angry.append(angry)
y_fear.append(fear)
y_happy.append(happy)
y_sad.append(sad)
y_surprise.append(surprise)
y_neutral.append(neutral)
ax1.clear()
ax1.plot(xs, y_angry)
ax1.plot(xs, y_fear)
ax1.plot(xs, y_happy)
ax1.plot(xs, y_sad)
ax1.plot(xs, y_surprise)
ax1.plot(xs, y_neutral)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| [((6, 0, 6, 28), 'matplotlib.style.use', 'style.use', ({(6, 10, 6, 27): '"""fivethirtyeight"""'}, {}), "('fivethirtyeight')", False, 'from matplotlib import style\n'), ((8, 6, 8, 18), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((40, 6, 40, 58), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (), '', True, 'import matplotlib.animation as animation\n'), ((41, 0, 41, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')] |
kirk86/ARS | code/run_policy.py | a4ac03e06bce5f183f7b18ea74b81c6c45c4426b | """
Code to load a policy and generate rollout data. Adapted from https://github.com/berkeleydeeprlcourse.
Example usage:
python run_policy.py ../trained_policies/Humanoid-v1/policy_reward_11600/lin_policy_plus.npz Humanoid-v1 --render \
--num_rollouts 20
"""
import numpy as np
import gym
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert rollouts')
args = parser.parse_args()
print('loading and building expert policy')
lin_policy = np.load(args.expert_policy_file)
lin_policy = lin_policy[lin_policy.files[0]]
M = lin_policy[0]
# mean and std of state vectors estimated online by ARS.
mean = lin_policy[1]
std = lin_policy[2]
env = gym.make(args.envname)
returns = []
observations = []
actions = []
for i in range(args.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = np.dot(M, (obs - mean)/std)
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, env.spec.timestep_limit))
if steps >= env.spec.timestep_limit:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
if __name__ == '__main__':
main()
| [((14, 13, 14, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((23, 17, 23, 49), 'numpy.load', 'np.load', ({(23, 25, 23, 48): 'args.expert_policy_file'}, {}), '(args.expert_policy_file)', True, 'import numpy as np\n'), ((31, 10, 31, 32), 'gym.make', 'gym.make', ({(31, 19, 31, 31): 'args.envname'}, {}), '(args.envname)', False, 'import gym\n'), ((59, 25, 59, 41), 'numpy.mean', 'np.mean', ({(59, 33, 59, 40): 'returns'}, {}), '(returns)', True, 'import numpy as np\n'), ((60, 27, 60, 42), 'numpy.std', 'np.std', ({(60, 34, 60, 41): 'returns'}, {}), '(returns)', True, 'import numpy as np\n'), ((43, 21, 43, 48), 'numpy.dot', 'np.dot', ({(43, 28, 43, 29): 'M', (43, 31, 43, 47): '(obs - mean) / std'}, {}), '(M, (obs - mean) / std)', True, 'import numpy as np\n')] |
TreshUp/poliastro | src/poliastro/plotting/tisserand.py | 602eb3c39d315be6dc1edaa12d72ab0e361334f6 | """ Generates Tisserand plots """
from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
class TisserandKind(Enum):
"""All possible Tisserand kinds"""
APSIS = "apsis"
ENERGY = "energy"
PERIOD = "period"
class TisserandPlotter:
"""Generates Tisserand figures"""
def __init__(self, kind=TisserandKind.APSIS, axes=None):
"""Object initializer
Parameters
----------
kind : TisserandKind
Nature for the Tisserand
axes : ~matplotlib.pyplot.axes
Axes for the figure
"""
# Asign Tisserand kind
self.kind = kind
# Check if axis available
if not axes:
_, self.ax = plt.subplots(1, 1)
else:
self.ax = axes
# Force axes scale regarding Tisserand kind
self.ax.set_xscale("log")
if self.kind == TisserandKind.APSIS:
self.ax.set_yscale("log")
def _solve_tisserand(
self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100
):
"""Solves all possible Tisserand lines with a meshgrid workflow
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_array : ~astropy.units.Quantity
Desired Vinf for the flyby
num_contours : int
Number of contour lines for flyby speed
alpha_lim : tuple
Minimum and maximum flyby angles.
N : int
Number of points for flyby angle.
Notes
-----
The algorithm for generating Tisserand plots is the one depicted in
"Preliminary Trajectory Design of a Mission to Enceladus" by David
Falcato Fialho Palma, section 3.6
"""
# Generate mean orbital elements Earth
body_rv = get_mean_elements(body).to_vectors()
R_body, V_body = norm(body_rv.r), norm(body_rv.v)
# Generate non-dimensional velocity and alpha span
vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)
alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)
vinf_array /= V_body
# Construct the mesh for any configuration
V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)
# Solving for non-dimensional a_sc and ecc_sc
A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))
ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)
# Compute main Tisserand variables
RR_P = A_SC * R_body * (1 - ECC_SC)
RR_A = A_SC * R_body * (1 + ECC_SC)
TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)
EE = -body.parent.k / (2 * A_SC * R_body)
# Build color lines to internal canvas
return RR_P, RR_A, EE, TT
def _build_lines(self, RR_P, RR_A, EE, TT, color):
"""Collect lines and append them to internal data
Parameters
----------
data : list
Array containing [RR_P, RR_A, EE, TT, color]
Returns
-------
lines: list
Plotting lines for the Tisserand
"""
# Plot desired kind lines
if self.kind == TisserandKind.APSIS:
# Generate apsis lines
lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)
elif self.kind == TisserandKind.ENERGY:
# Generate energy lines
lines = self.ax.plot(
RR_P.to(u.AU), EE.to(u.km ** 2 / u.s ** 2), color=color
)
elif self.kind == TisserandKind.PERIOD:
# Generate period lines
lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)
return lines
def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):
"""Plots body Tisserand line within flyby angle
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf : ~astropy.units.Quantity
Vinf velocity line
alpha_lim : tuple
Minimum and maximum flyby angles
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# HACK: to reuse Tisserand solver, we transform input Vinf into a tuple
vinf_span = (vinf, vinf)
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(
body, vinf_span, num_contours=2, alpha_lim=alpha_lim
)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
def plot(self, body, vinf_span, num_contours=10, color=None):
"""Plots body Tisserand for given amount of solutions within Vinf span
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_span : tuple
Minimum and maximum Vinf velocities
num_contours : int
Number of points to iterate over previously defined velocities
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
| [((81, 21, 81, 75), 'numpy.linspace', 'np.linspace', ({(81, 33, 81, 45): 'vinf_span[0]', (81, 47, 81, 60): 'vinf_span[-1]', (81, 62, 81, 74): 'num_contours'}, {}), '(vinf_span[0], vinf_span[-1], num_contours)', True, 'import numpy as np\n'), ((82, 22, 82, 65), 'numpy.linspace', 'np.linspace', ({(82, 34, 82, 46): 'alpha_lim[0]', (82, 48, 82, 61): 'alpha_lim[-1]', (82, 63, 82, 64): 'N'}, {}), '(alpha_lim[0], alpha_lim[-1], N)', True, 'import numpy as np\n'), ((86, 23, 86, 59), 'numpy.meshgrid', 'np.meshgrid', ({(86, 35, 86, 45): 'vinf_array', (86, 47, 86, 58): 'alpha_array'}, {}), '(vinf_array, alpha_array)', True, 'import numpy as np\n'), ((90, 17, 90, 81), 'numpy.sqrt', 'np.sqrt', ({(90, 25, 90, 80): '1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / 2) ** 2'}, {}), '(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / 2) ** 2)', True, 'import numpy as np\n'), ((41, 25, 41, 43), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(41, 38, 41, 39): '1', (41, 41, 41, 42): '1'}, {}), '(1, 1)', True, 'from matplotlib import pyplot as plt\n'), ((78, 25, 78, 40), 'poliastro.util.norm', 'norm', ({(78, 30, 78, 39): 'body_rv.r'}, {}), '(body_rv.r)', False, 'from poliastro.util import norm\n'), ((78, 42, 78, 57), 'poliastro.util.norm', 'norm', ({(78, 47, 78, 56): 'body_rv.v'}, {}), '(body_rv.v)', False, 'from poliastro.util import norm\n'), ((95, 25, 95, 70), 'numpy.sqrt', 'np.sqrt', ({(95, 33, 95, 69): '((A_SC * R_body) ** 3 / body.parent.k)'}, {}), '((A_SC * R_body) ** 3 / body.parent.k)', True, 'import numpy as np\n'), ((77, 18, 77, 41), 'poliastro.twobody.mean_elements.get_mean_elements', 'get_mean_elements', ({(77, 36, 77, 40): 'body'}, {}), '(body)', False, 'from poliastro.twobody.mean_elements import get_mean_elements\n'), ((89, 55, 89, 68), 'numpy.cos', 'np.cos', ({(89, 62, 89, 67): 'ALPHA'}, {}), '(ALPHA)', True, 'import numpy as np\n')] |
jlin/inventory | libs/Rack.py | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | from KeyValueTree import KeyValueTree
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import KeyValue as KeyValue
from django.test.client import RequestFactory
from api_v2.keyvalue_handler import KeyValueHandler
import json
factory = RequestFactory()
class Rack:
rack_name = None
tree = None
kv = None
ru = None
width = None
systems = []
ethernet_patch_panel_24 = []
ethernet_patch_panel_48 = []
def __init__(self, rack_name):
self.systems = []
self.rack_name = rack_name
self.kv = Truth.objects.select_related('truth_key_value').get(name=self.rack_name)
self.system_list = KeyValue.objects.select_related('system').filter(value__contains="truth:%s" % (self.rack_name))
self.ethernet_patch_panel_24 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 24)
self.ethernet_patch_panel_48 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 48)
import pdb
h = KeyValueHandler()
for s in self.system_list:
request = factory.get('/api/v2/keyvalue/?keystore=%s' % (s.system.hostname), follow=True)
tree = h.read(request)
system_ru = self._get_system_ru(tree)
system_image = self._get_system_image(tree)
system_slot = self._get_system_slot(tree)
self.systems.append({
"system_name":s.system.hostname,
"system_id":s.system.id,
"system_ru":system_ru,
"system_image":system_image,
'system_slot':system_slot,
'operating_system':str(s.system.operating_system),
'server_model': str(s.system.server_model),
'oob_ip': str(s.system.oob_ip),
})
self.systems = sorted(self.systems, key=lambda k: k['system_slot'])
try:
self.ru = self.kv.keyvalue_set.get(key='rack_ru').value
except:
self.ru = 42
try:
self.width = self.kv.keyvalue_set.get(key='rack_width').value
except:
self.width = 30
def _get_ethernet_patch_panels(self, tree, type, port_count):
ret = []
for i in tree.keyvalue_set.all():
match_string = "%i_port_%s_patch_panel" % (port_count, type)
if str(i.key) == match_string:
ret.append(i.value)
return ret
def _get_system_ru(self, tree):
for i in tree.iterkeys():
try:
if 'system_ru' in i.split(':'):
return tree[i]
except:
pass
return 4
def _get_system_image(self, tree):
for i in tree.iterkeys():
try:
if 'system_image' in i.split(':'):
return tree[i]
except:
pass
return None
def _get_system_slot(self, tree):
for i in tree.iterkeys():
try:
if 'system_slot' in i.split(':'):
return tree[i]
except:
pass
return 1
| [((8, 10, 8, 26), 'django.test.client.RequestFactory', 'RequestFactory', ({}, {}), '()', False, 'from django.test.client import RequestFactory\n'), ((28, 12, 28, 29), 'api_v2.keyvalue_handler.KeyValueHandler', 'KeyValueHandler', ({}, {}), '()', False, 'from api_v2.keyvalue_handler import KeyValueHandler\n'), ((23, 18, 23, 65), 'truth.models.Truth.objects.select_related', 'Truth.objects.select_related', ({(23, 47, 23, 64): '"""truth_key_value"""'}, {}), "('truth_key_value')", False, 'from truth.models import KeyValue as TruthKeyValue, Truth\n'), ((24, 27, 24, 68), 'systems.models.KeyValue.objects.select_related', 'KeyValue.objects.select_related', ({(24, 59, 24, 67): '"""system"""'}, {}), "('system')", True, 'from systems.models import KeyValue as KeyValue\n')] |
returntocorp/inputset-generator | r2c_isg/functions/__init__.py | c33952cc5683e9e70b24f76936c42ec8e354d121 | from .trim import trim
from .sample import sample
from .sort import sort
function_map = {
'trim': trim,
'sample': sample,
'sort': sort
}
| [] |
csalyk/nirspec | __init__.py | 58661371871d29103afe42bfccc0bff9ff773914 | from .nirspec import divspec
from .nirspec import gluespec
| [] |
xcollantes/poetry-generator | poem.py | 456c9702f0105b49b8c3edbb55043a10efbf359b | from __future__ import absolute_import
from __future__ import print_function
import datetime
import os
import random
import sys
import uuid
import base64
import yaml
import re
try:
import en
except:
print("DOWNLOD NODECUBE")
print("""wget https://www.nodebox.net/code/data/media/linguistics.zip
unzip linguistics.zip""")
VERSION = "1.1"
THEME_PROB = 0
class bnfDictionary:
def __init__(self, file):
self.grammar = yaml.load(open(file,'r'))
self.poemtype = "<poem>"
def generate(self, key, num):
gram = self.grammar[key]
if len(gram)==1:
i = 0
else:
i = random.randint(0, len(gram) - 1)
string = ""
if "<" not in gram[i]:
string = gram[i]
else:
for word in gram[i].split():
if "<" not in word:
string = string + word + " "
else:
if "verb" in word and word != '<adverb>':
if "pverb" in word or "mushy" in self.poemtype:
v = self.generate("<pverb>", 1).strip()
elif "nverb" in word:
v = self.generate("<nverb>", 1).strip()
# else:
# v = self.generate("<verb>", 1).strip()
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-verb>", 1).strip()
if "verb-inf" in word:
string = string + \
en.verb.present_participle(v) + " "
elif "verb-pr" in word:
string = string + \
en.verb.present(
v, person=3, negate=False) + " "
elif "verb-past" in word:
string = string + en.verb.past(v) + " "
else:
string = string + v + " "
elif "noun" in word:
if "pnoun" in word or "mushy" in self.poemtype:
v = self.generate("<pnoun>", 1).strip()
elif "nnoun" in word:
v = self.generate("<nnoun>", 1).strip()
else:
v = self.generate("<noun>", 1).strip()
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-noun>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + v + " "
elif "person" in word:
v = self.generate("<person>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + v + " "
elif "adj" in word:
if "mushy" in self.poemtype:
v = self.generate("<padj>",1)
else:
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-adj>", 1).strip()
else:
v = self.generate(word, 1).strip()
string = string + v + " "
elif "fruit" in word:
v = self.generate("<fruit>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + self.generate(word, 1) + " "
elif "person" in word:
v = self.generate("<fruit>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + self.generate(word, 1) + " "
else:
if "-pl" in word:
v = en.noun.plural(self.generate(word.replace("-pl",""),1))
else:
v = self.generate(word, 1)
string = string + v + " "
return string
def generatePretty(self, key, seed_str):
if seed_str == None:
seed_str = str(uuid.uuid4()).split("-")[0]
random.seed(uuid.uuid5(uuid.NAMESPACE_DNS,seed_str).int)
#tool = language_check.LanguageTool('en-US')
self.poemtype = key
if key == "<mushypoem>":
key = "<poem>"
poem = self.generate(key, 1)
poem = poem.replace(" ,", ",")
puncuation = [".", ".", ".", ".", "!", "?"]
dontbreaks = ["of", "behind", "the", "when", "what", "why", "who", ",",
"your", "by", "like", "to", "you", "your", "a", "are", "become", "newline"]
capitalize = False
breaks = 0
poem2 = []
foundFirstBreak = False
for word in poem.replace("\n", "newline").split():
poem2.append(word.lower())
if random.randint(1, 100) < 2 and "newline" not in word and foundFirstBreak:
isgood = True
for dontbreak in list(dontbreaks + puncuation):
if dontbreak == word.lower():
isgood = False
if isgood:
poem2.append("newline")
if "newline" in word:
foundFirstBreak = True
poem3 = []
beforeFirstBreak = True
for word in poem2:
if "newline" in word:
breaks += 1
beforeFirstBreak = False
else:
breaks = 0
if beforeFirstBreak or word == "i" or "i'" in word:
word = word.capitalize()
poem3.append(word)
capitalize = False
else:
if breaks > 1:
capitalize = True
if capitalize == True and "newline" not in word:
word = word.capitalize()
capitalize = False
for punc in list(set(puncuation)):
if punc in word:
capitalize = True
poem3.append(word)
if random.randint(1, 100) < 0 and "newline" not in word:
isgood = True
for dontbreak in list(dontbreaks + puncuation):
if dontbreak == word.lower():
isgood = False
if isgood:
poem3.append(random.choice(puncuation))
capitalize = True
# noPunc = True
# for punc in list(set(puncuation)):
# if punc in word:
# noPunc = False
# if noPunc:
# poem3.append(random.choice(puncuation))
newPoem = " ".join(poem3)
newPoem = newPoem.replace(" a a", " an a")
newPoem = newPoem.replace("newline .", ". newline")
newPoem = newPoem.replace("newline ?", "? newline")
newPoem = newPoem.replace("newline !", "! newline")
newPoem = newPoem.replace("newline ,", ", newline")
newPoem = newPoem.replace("newline", "\n")
newPoem = newPoem.replace(" \n \n", "\n\n")
newPoem = newPoem.replace("\n \n ", "\n\n")
newPoem = newPoem.replace(" '", "'")
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
newPoem = newPoem.replace(" ,", ",")
newPoem = newPoem.replace("?.", "?")
newPoem = newPoem.replace(".?", ".")
newPoem = newPoem.replace(",.", ",")
newPoem = newPoem.replace("!.", "!")
newPoem = newPoem.replace("..", ".")
newPoem = newPoem.replace("..", ".")
newPoem = newPoem.replace("..", ".")
title = newPoem.split("\n")[0]
newTitle = title.replace(".", "")
newPoem = newPoem.replace(title, "<h1>" + newTitle + "</h1>")
newPoem2 = ""
firstLine = False
secondLine = False
for line in newPoem.split("\n"):
if len(line) > 0:
if firstLine and not secondLine:
newPoem2 = newPoem2 + "<p>\n"
secondLine = True
if firstLine == False:
firstLine = True
newPoem2 = newPoem2 + line + " \n"
if firstLine and secondLine:
newPoem2 = newPoem2 + line + " <br />\n"
else:
newPoem2 = newPoem2 + " <br />\n"
newPoem2 = newPoem2 + "</p>"
return newPoem2,seed_str
bnf = bnfDictionary('brain.yaml')
def generate_poem(poemtype, hex_seed=None):
p,seed_str = bnf.generatePretty('<' + poemtype + '>',hex_seed)
return p,seed_str
if __name__ == '__main__':
poemtype = 'poem'
if 'mushy' in sys.argv[1:]:
poemtype = 'mushypoem'
p,seed_str=generate_poem(poemtype)
print(("*"*30 + "\n"*5))
filtered = []
for line in re.sub("<.*?>", " ", p).split("\n"):
if len(line.strip()) > 0:
filtered.append(line.strip())
else:
filtered.append("pause")
print(p)
| [((235, 16, 235, 39), 're.sub', 're.sub', ({(235, 23, 235, 30): '"""<.*?>"""', (235, 32, 235, 35): '""" """', (235, 37, 235, 38): 'p'}, {}), "('<.*?>', ' ', p)", False, 'import re\n'), ((112, 20, 112, 59), 'uuid.uuid5', 'uuid.uuid5', ({(112, 31, 112, 49): 'uuid.NAMESPACE_DNS', (112, 50, 112, 58): 'seed_str'}, {}), '(uuid.NAMESPACE_DNS, seed_str)', False, 'import uuid\n'), ((128, 15, 128, 37), 'random.randint', 'random.randint', ({(128, 30, 128, 31): '(1)', (128, 33, 128, 36): '(100)'}, {}), '(1, 100)', False, 'import random\n'), ((160, 19, 160, 41), 'random.randint', 'random.randint', ({(160, 34, 160, 35): '(1)', (160, 37, 160, 40): '(100)'}, {}), '(1, 100)', False, 'import random\n'), ((51, 27, 51, 49), 'random.randint', 'random.randint', ({(51, 42, 51, 43): '(1)', (51, 45, 51, 48): '(100)'}, {}), '(1, 100)', False, 'import random\n'), ((110, 27, 110, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((166, 37, 166, 62), 'random.choice', 'random.choice', ({(166, 51, 166, 61): 'puncuation'}, {}), '(puncuation)', False, 'import random\n'), ((71, 27, 71, 49), 'random.randint', 'random.randint', ({(71, 42, 71, 43): '(1)', (71, 45, 71, 48): '(100)'}, {}), '(1, 100)', False, 'import random\n'), ((74, 32, 74, 49), 'en.noun.plural', 'en.noun.plural', ({(74, 47, 74, 48): 'v'}, {}), '(v)', False, 'import en\n'), ((55, 32, 55, 61), 'en.verb.present_participle', 'en.verb.present_participle', ({(55, 59, 55, 60): 'v'}, {}), '(v)', False, 'import en\n'), ((79, 32, 79, 49), 'en.noun.plural', 'en.noun.plural', ({(79, 47, 79, 48): 'v'}, {}), '(v)', False, 'import en\n'), ((58, 32, 59, 62), 'en.verb.present', 'en.verb.present', (), '', False, 'import en\n'), ((61, 46, 61, 61), 'en.verb.past', 'en.verb.past', ({(61, 59, 61, 60): 'v'}, {}), '(v)', False, 'import en\n'), ((85, 31, 85, 53), 'random.randint', 'random.randint', ({(85, 46, 85, 47): '(1)', (85, 49, 85, 52): '(100)'}, {}), '(1, 100)', False, 'import random\n'), ((93, 32, 93, 49), 'en.noun.plural', 'en.noun.plural', ({(93, 47, 93, 48): 'v'}, {}), '(v)', False, 'import en\n'), ((98, 32, 98, 49), 'en.noun.plural', 'en.noun.plural', ({(98, 47, 98, 48): 'v'}, {}), '(v)', False, 'import en\n')] |
infonova/openstacksdk | openstack/tests/unit/block_storage/v2/test_proxy.py | 3cf6730a71d8fb448f24af8a5b4e82f2af749cea | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.block_storage.v2 import _proxy
from openstack.block_storage.v2 import snapshot
from openstack.block_storage.v2 import stats
from openstack.block_storage.v2 import type
from openstack.block_storage.v2 import volume
from openstack.tests.unit import test_proxy_base
class TestVolumeProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestVolumeProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_snapshot_get(self):
self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot)
def test_snapshots_detailed(self):
self.verify_list(self.proxy.snapshots, snapshot.SnapshotDetail,
paginated=True,
method_kwargs={"details": True, "query": 1},
expected_kwargs={"query": 1})
def test_snapshots_not_detailed(self):
self.verify_list(self.proxy.snapshots, snapshot.Snapshot,
paginated=True,
method_kwargs={"details": False, "query": 1},
expected_kwargs={"query": 1})
def test_snapshot_create_attrs(self):
self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot)
def test_snapshot_delete(self):
self.verify_delete(self.proxy.delete_snapshot,
snapshot.Snapshot, False)
def test_snapshot_delete_ignore(self):
self.verify_delete(self.proxy.delete_snapshot,
snapshot.Snapshot, True)
def test_type_get(self):
self.verify_get(self.proxy.get_type, type.Type)
def test_types(self):
self.verify_list(self.proxy.types, type.Type, paginated=False)
def test_type_create_attrs(self):
self.verify_create(self.proxy.create_type, type.Type)
def test_type_delete(self):
self.verify_delete(self.proxy.delete_type, type.Type, False)
def test_type_delete_ignore(self):
self.verify_delete(self.proxy.delete_type, type.Type, True)
def test_volume_get(self):
self.verify_get(self.proxy.get_volume, volume.Volume)
def test_volumes_detailed(self):
self.verify_list(self.proxy.volumes, volume.VolumeDetail,
paginated=True,
method_kwargs={"details": True, "query": 1},
expected_kwargs={"query": 1})
def test_volumes_not_detailed(self):
self.verify_list(self.proxy.volumes, volume.Volume,
paginated=True,
method_kwargs={"details": False, "query": 1},
expected_kwargs={"query": 1})
def test_volume_create_attrs(self):
self.verify_create(self.proxy.create_volume, volume.Volume)
def test_volume_delete(self):
self.verify_delete(self.proxy.delete_volume, volume.Volume, False)
def test_volume_delete_ignore(self):
self.verify_delete(self.proxy.delete_volume, volume.Volume, True)
def test_volume_extend(self):
self._verify("openstack.block_storage.v2.volume.Volume.extend",
self.proxy.extend_volume,
method_args=["value", "new-size"],
expected_args=["new-size"])
def test_backend_pools(self):
self.verify_list(self.proxy.backend_pools, stats.Pools,
paginated=False)
| [((24, 21, 24, 47), 'openstack.block_storage.v2._proxy.Proxy', '_proxy.Proxy', ({(24, 34, 24, 46): 'self.session'}, {}), '(self.session)', False, 'from openstack.block_storage.v2 import _proxy\n')] |
starofrainnight/ncstyler | src/ncstyler/console.py | d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb | #!/usr/bin/env python
import argparse
import CppHeaderParser
import re
import sys
import yaml
import copy
import six
import os.path
import traceback
class CppDefine(dict):
def __init__(self):
self["name"] = None
self["parameters"] = []
self["line_number"] = -1
class CppDefineParameter(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class CppNamespace(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class CppFileName(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class Application(object):
def __init__(self):
description='''A styler just target to naming conventions of source
code'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-c", "--config",
help="Configuration file path (In YAML format)",
required=True)
parser.add_argument("-o", "--output", help="Output file path")
parser.add_argument("-d", "--debug", action='store_true', help="Print trace stack")
parser.add_argument("file_path", help="Source file path")
self.__args = parser.parse_args()
# If user does not specific output path, we default it to input file
# path
if self.__args.output is None:
self.__args.output = self.__args.file_path
self.__config = yaml.load(open(self.__args.config))
old_base = self.__config["_base_"]
self.__config["_base_"] = {
"re":"[a-zA-Z0-9_]+",
"error": "",
}
self.__config["_base_"].update(old_base)
def parse_define(self, adefine):
matched = re.match(r"[^\w]*(\w+)(?:\(([^\)]*)\)|\s*).*", adefine)
name = matched.group(1)
parameters = []
if matched.group(2) is not None:
parameter_names = matched.group(2).split(',')
for parameter_name in parameter_names:
aparameter = CppDefineParameter()
aparameter["name"] = parameter_name.strip()
parameters.append(aparameter)
result = CppDefine()
result["name"] = name
result["parameters"] = parameters
return result
def _is_special_method(self, amethod):
if isinstance(amethod, six.string_types):
amethod_name = amethod
else:
amethod_name = amethod["name"]
founded = re.findall(r"(?:^|[^\w]+)operator[^\w]+", amethod_name)
if len(founded) <= 0:
if re.match(r"(?:^|.*\W)operator\W.*", amethod["debug"]) is not None:
return True
return False
return True
def _get_argument_name(self, an_argument):
if isinstance(an_argument, six.string_types):
return an_argument
if len(an_argument["name"]) > 0:
return an_argument["name"]
# If it's a functor?? with "class name::function" style
matched = re.match(r"^\w+\s*\(\w*::\*(\w+)\)\(.*$", an_argument["type"])
if matched is None:
# with normal "function" style
matched = re.match(r"[^\(]*\([^\)]*\W(\w+)\W.*\).*", an_argument["type"])
if matched is None:
return ""
else:
return matched.group(1)
def _get_config(self, name):
override_table = {
"class": "_base_",
"function": "_base_",
"variant": "_base_",
"namespace": "_base_",
"define": "_base_",
"filename": "_base_", # Special config use to define filename rule
"argument": "variant",
"static_variant": "variant",
"global_variant": "variant",
"function_argument": "argument",
"class_method_argument": "function_argument",
"struct_method_argument": "class_method_argument",
"define_function_argument": "function_argument",
"define_function": "function",
"class_method": "function",
"struct_method": "class_method",
"class_variant": "variant",
"struct_variant": "class_variant",
"typedef": "class",
"struct": "class",
"enum": "class",
"enum_value": "define",
"union": "struct",
}
my_config = dict()
if name in override_table:
base_name = override_table[name]
my_config.update(self._get_config(base_name))
if name in self.__config:
my_config.update(self.__config[name])
return my_config
def _is_valid_variable(self, cpp_variable):
if cpp_variable["type"] == "return":
return False
if len(cpp_variable["type"]) <= 0:
return False
return True
def _get_cpp_method_re(self, name):
prefix = "operator"
if not name.startswith(prefix):
return re.escape(name)
# Operator methods
chars = []
for achar in name[len(prefix):]:
chars.append("\\s*")
if achar.isalnum():
chars.append(achar)
else:
chars.append("\\")
chars.append(achar)
return "operator%s" % ''.join(chars)
def _validate_codes_of_cpp_method(self, cpp_method):
start_line_index = cpp_method["line_number"] - 1
# Extract cpp method codes
rest_lines = self._source_lines[start_line_index:]
content = '\n'.join(rest_lines)
code_lines = []
name_re = self._get_cpp_method_re(cpp_method["name"])
name_start_pos = re.search(name_re, content).span()[0]
parameters_start_pos = content.index('(', name_start_pos)
parameters_stop_pos = content.index(')', parameters_start_pos)
stack = []
try:
i = content.index('{', parameters_stop_pos + 1)
except ValueError:
return;
try:
semicolonPos = content.index(';', parameters_stop_pos + 1)
if semicolonPos <= i:
return;
except ValueError:
# Not found a semicolon, just ignored.
pass
skipped_lines = cpp_method["line_number"] + content.count("\n", 0, i) - 2
stack.append(i)
i += 1
first_i = i
last_i = 0
is_finding_block_comment = False
is_finding_single_comment = False
while (len(stack) > 0) and (i < len(content)):
c = content[i]
if is_finding_block_comment:
# If finding block comment, then skip all other searching
if (c == "*") and (content[i + 1] == "/"):
is_finding_block_comment = False
elif (c == "/") and (content[i + 1] == "*"):
is_finding_block_comment = True
elif is_finding_single_comment:
# If finding single comment, then skip all other searching
if c == "\n":
is_finding_single_comment = False
elif (c == "/") and (content[i + 1] == "/"):
is_finding_single_comment = True
elif c == "{":
stack.append(i)
elif c == "}":
last_i = i
del stack[len(stack) - 1]
i += 1
if len(stack) <= 0:
content = content[first_i:last_i]
founded = re.findall(r"\w+\W+(\w+)\s*=[^=]", content)
for aname in founded:
avariant = dict()
avariant["name"] = aname
avariant["line_number"] = cpp_method["line_number"]
self._validate_name(avariant, "variant")
def _validate_name(self, cpp_object, name_re):
cpp_object_name = ""
if isinstance(cpp_object, six.string_types):
cpp_object_name = cpp_object
cpp_object = dict()
cpp_object["name"] = cpp_object_name
cpp_object["line_number"] = -1
elif "name" in cpp_object:
cpp_object_name = cpp_object["name"]
if ('<' in cpp_object_name) and ("debug" in cpp_object):
matched = re.match(r".*?(\w+)\W+$", cpp_object["debug"])
if matched is not None:
cpp_object_name = matched.group(1)
else:
return
# Parse union like names
splitted = cpp_object_name.split()
if len(splitted) > 1:
cpp_object_name = splitted[-1]
if '...' in cpp_object_name:
# Does not have valid name, we must not check it .
return
if len(cpp_object_name) <= 0:
# Does not have valid name, we must not check it .
return
matched = re.match(self._get_config(name_re)["re"], cpp_object_name)
if matched is None:
filename = os.path.basename(self.__args.file_path)
error_message = self._get_config(name_re)["error"]
if len(error_message) > 0:
error_message = "%s %s" % (
' '.join([rule_name.capitalize() for rule_name in name_re.split("_")]),
error_message)
if self.__args.debug:
traceback.print_stack()
raise SyntaxError("%s:%s:error: Name '%s' isn't matched with rule : %s! %s" % (
filename,
cpp_object["line_number"],
cpp_object_name,
name_re,
error_message))
def _get_class_realname(self, class_name):
return re.match(r"(\w+).*", class_name).group(1)
def _validate_cpp_object(self, cpp_object):
cpp_object_type = type(cpp_object)
if cpp_object_type == CppDefine:
if len(cpp_object["parameters"]) <= 0:
# Normal Define Name
self._validate_name(cpp_object, "define")
else:
# Function Liked Define Name
self._validate_name(cpp_object, "define_function")
for aparameter in cpp_object["parameters"]:
self._validate_name(aparameter, "define_function_argument")
elif cpp_object_type == CppHeaderParser.CppClass:
if "struct" in cpp_object["declaration_method"]:
class_re = "struct"
class_method_re = "struct_method"
class_method_argument_re = "struct_method_argument"
class_variant_re = "struct_variant"
else:
class_re = "class"
class_method_re = "class_method"
class_method_argument_re = "class_method_argument"
class_variant_re = "class_variant"
self._validate_name(cpp_object, class_re)
for amethod in cpp_object.get_all_methods():
matched = re.match(r".*typedef\W[^\(]*\([^\)]*\W(\w+)\W.*\).*", amethod["debug"])
if matched is None:
self._validate_codes_of_cpp_method(amethod)
if not self._is_special_method(amethod):
if ((amethod["name"] != self._get_class_realname(cpp_object["name"]))
and (not amethod.get("constructor", False))
and (not amethod.get("destructor", False))):
try:
self._validate_name(amethod, class_method_re)
except SyntaxError:
is_need_reraise = True
try:
self._validate_name(amethod, "define_function")
is_need_reraise = False
except SyntaxError:
pass
if is_need_reraise:
raise
for aparameter in amethod["parameters"]:
an_object = dict()
an_object["line_number"] = aparameter["line_number"]
if (aparameter["type"].endswith("::*")
and (")" in aparameter["name"])):
an_object["name"] = re.match(r"(\w+).*", aparameter["name"]).group(1)
try:
self._validate_name(an_object,
class_method_re)
except SyntaxError:
is_need_reraise = True
try:
self._validate_name(amethod, "define_function")
is_need_reraise = False
except SyntaxError:
pass
if is_need_reraise:
raise
else:
an_object["name"] = self._get_argument_name(aparameter)
self._validate_name(an_object,
class_method_argument_re)
else:
self._validate_name(
{"name":matched.group(1), "line_number":amethod["line_number"]},
"typedef")
for access_specifier in CppHeaderParser.supportedAccessSpecifier:
for amember in cpp_object["properties"][access_specifier]:
is_skip_validate = False
if ("type" in amember) and (amember["type"] is not None):
internal_predeclares = ["class", "struct", "union"]
if amember["type"] in internal_predeclares:
is_skip_validate = True
if not is_skip_validate:
if amember["static"]:
self._validate_name(amember, "static_variant")
else:
self._validate_name(amember, class_variant_re)
for amember in cpp_object["structs"][access_specifier]:
self._validate_cpp_object(amember)
for amember in cpp_object["enums"][access_specifier]:
self._validate_cpp_object(amember)
elif cpp_object_type == CppHeaderParser.CppStruct:
self._validate_name(cpp_object, "struct")
elif cpp_object_type == CppHeaderParser.CppEnum:
self._validate_name(cpp_object, "enum")
line_number = -1
if "line_number" in cpp_object:
line_number = cpp_object["line_number"]
for amember in cpp_object["values"]:
# Use parent line number if enum value does not have it's line
# number
if "line_number" not in amember:
amember["line_number"] = line_number
self._validate_name(amember, "enum_value")
elif cpp_object_type == CppHeaderParser.CppVariable:
if cpp_object["type"] != "return":
if cpp_object["static"]:
self._validate_name(cpp_object, "static_variant")
elif cpp_object["type"] not in ["class", "struct", "union"]:
if not cpp_object["type"].endswith("::"):
# Don't parse variable that implemented outside of
# template class. It's already be parsed when parsing
# the class.
self._validate_name(cpp_object, "global_variant")
elif cpp_object_type == CppHeaderParser.CppMethod:
# Exclude "main" function while parsing global function
while True:
# FIXME: Parse special case : "struct RArraySize <T ( & ) [ N ]> {"
if "debug" in cpp_object:
if re.match(r".*\>\s*{$", cpp_object["debug"]) is not None:
break
self._validate_codes_of_cpp_method(cpp_object)
if cpp_object["name"] == "main":
break
if self._is_special_method(cpp_object):
break
if (cpp_object["class"] is None) or (len(cpp_object["class"]) <= 0):
if ">" in cpp_object["name"]:
regex = r"^[^<:]*?(?:(\w+)::)?(\w+)\s*<"
matched = re.search(regex, cpp_object["debug"])
if matched.group(1) is not None:
cpp_object["class"] = matched.group(1)
cpp_object["name"] = matched.group(2)
self._validate_name(cpp_object, "class_method")
elif len(cpp_object["returns"]) > 0:
# If a function does not have return value(at least
# "void"), it maybe macro invokes.
# FIXME: We just ignored this situation:
# Code Snippets: static RSignal<void(int)> sReceived;
if "<" not in cpp_object["name"]:
self._validate_name(cpp_object, "function")
break
if self._get_class_realname(cpp_object["class"]) == cpp_object["name"]:
# Constructor / Destructor will the same with class name
break
self._validate_name(cpp_object, "class_method")
break
elif cpp_object_type == CppHeaderParser.CppUnion:
self._validate_name(cpp_object, "union")
elif cpp_object_type == CppNamespace:
self._validate_name(cpp_object, "namespace")
elif cpp_object_type == CppFileName:
self._validate_name(cpp_object, "filename")
def exec_(self):
try:
with open(self.__args.file_path, "r") as source_file:
# For later parse by _validate_codes_of_cpp_method()
self._source_lines = source_file.readlines()
parsed_info = CppHeaderParser.CppHeader(self.__args.file_path)
# Verify File Names
filename = os.path.basename(self.__args.file_path)
cpp_object = CppFileName()
cpp_object["name"] = filename
self._validate_cpp_object(cpp_object)
# Verify Define Names
for define_text in parsed_info.defines:
self._validate_cpp_object(self.parse_define(define_text))
# Verify Function Names
for cpp_object in parsed_info.functions:
self._validate_cpp_object(cpp_object)
# Verify Class Names
for cpp_object in parsed_info.classes_order:
self._validate_cpp_object(cpp_object)
# Verify Struct Names
for cpp_object in parsed_info.structs_order:
self._validate_cpp_object(cpp_object)
# Verify Enum Names
for cpp_object in parsed_info.enums:
self._validate_cpp_object(cpp_object)
# Verify Variable Names
for cpp_object in parsed_info.variables:
# Avoid checking member variable inside function body.
if '{' not in cpp_object['type']:
self._validate_cpp_object(cpp_object)
for namespace in parsed_info.namespaces:
cpp_object = CppNamespace()
cpp_object["name"] = namespace
self._validate_cpp_object(cpp_object)
# Verify Typdef Names
for cpp_object in parsed_info.typedefs:
self._validate_cpp_object(cpp_object)
except SyntaxError as e:
print(str(e))
return 1
except CppHeaderParser.CppHeaderParser.CppParseError as e:
# CppHeaderParser can't parse this file, but we should pass it, this
# is the CppHeaderParser's problem.
print(str(e))
return 0
return 0
def main():
a = Application()
sys.exit(a.exec_())
if __name__ == "__main__":
# Execute only if run as a script
main()
| [((39, 17, 39, 65), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((63, 18, 63, 73), 're.match', 're.match', ({(63, 27, 63, 63): '"""[^\\\\w]*(\\\\w+)(?:\\\\(([^\\\\)]*)\\\\)|\\\\s*).*"""', (63, 65, 63, 72): 'adefine'}, {}), "('[^\\\\w]*(\\\\w+)(?:\\\\(([^\\\\)]*)\\\\)|\\\\s*).*', adefine)", False, 'import re\n'), ((84, 18, 84, 73), 're.findall', 're.findall', ({(84, 29, 84, 58): '"""(?:^|[^\\\\w]+)operator[^\\\\w]+"""', (84, 60, 84, 72): 'amethod_name'}, {}), "('(?:^|[^\\\\w]+)operator[^\\\\w]+', amethod_name)", False, 'import re\n'), ((101, 18, 101, 80), 're.match', 're.match', ({(101, 27, 101, 58): '"""^\\\\w+\\\\s*\\\\(\\\\w*::\\\\*(\\\\w+)\\\\)\\\\(.*$"""', (101, 60, 101, 79): "an_argument['type']"}, {}), "('^\\\\w+\\\\s*\\\\(\\\\w*::\\\\*(\\\\w+)\\\\)\\\\(.*$', an_argument['type'])", False, 'import re\n'), ((104, 22, 104, 85), 're.match', 're.match', ({(104, 31, 104, 63): '"""[^\\\\(]*\\\\([^\\\\)]*\\\\W(\\\\w+)\\\\W.*\\\\).*"""', (104, 65, 104, 84): "an_argument['type']"}, {}), "('[^\\\\(]*\\\\([^\\\\)]*\\\\W(\\\\w+)\\\\W.*\\\\).*', an_argument['type'])", False, 'import re\n'), ((162, 19, 162, 34), 're.escape', 're.escape', ({(162, 29, 162, 33): 'name'}, {}), '(name)', False, 'import re\n'), ((236, 22, 236, 65), 're.findall', 're.findall', ({(236, 33, 236, 55): '"""\\\\w+\\\\W+(\\\\w+)\\\\s*=[^=]"""', (236, 57, 236, 64): 'content'}, {}), "('\\\\w+\\\\W+(\\\\w+)\\\\s*=[^=]', content)", False, 'import re\n'), ((474, 26, 474, 74), 'CppHeaderParser.CppHeader', 'CppHeaderParser.CppHeader', ({(474, 52, 474, 73): 'self.__args.file_path'}, {}), '(self.__args.file_path)', False, 'import CppHeaderParser\n'), ((86, 15, 86, 68), 're.match', 're.match', ({(86, 24, 86, 49): '"""(?:^|.*\\\\W)operator\\\\W.*"""', (86, 51, 86, 67): "amethod['debug']"}, {}), "('(?:^|.*\\\\W)operator\\\\W.*', amethod['debug'])", False, 'import re\n'), ((282, 16, 282, 39), 'traceback.print_stack', 'traceback.print_stack', ({}, {}), '()', False, 'import traceback\n'), ((292, 15, 292, 47), 're.match', 're.match', ({(292, 24, 292, 34): '"""(\\\\w+).*"""', (292, 36, 292, 46): 'class_name'}, {}), "('(\\\\w+).*', class_name)", False, 'import re\n'), ((183, 25, 183, 52), 're.search', 're.search', ({(183, 35, 183, 42): 'name_re', (183, 44, 183, 51): 'content'}, {}), '(name_re, content)', False, 'import re\n'), ((253, 26, 253, 72), 're.match', 're.match', ({(253, 35, 253, 50): '""".*?(\\\\w+)\\\\W+$"""', (253, 52, 253, 71): "cpp_object['debug']"}, {}), "('.*?(\\\\w+)\\\\W+$', cpp_object['debug'])", False, 'import re\n'), ((320, 26, 320, 97), 're.match', 're.match', ({(320, 35, 320, 78): '""".*typedef\\\\W[^\\\\(]*\\\\([^\\\\)]*\\\\W(\\\\w+)\\\\W.*\\\\).*"""', (320, 80, 320, 96): "amethod['debug']"}, {}), "('.*typedef\\\\W[^\\\\(]*\\\\([^\\\\)]*\\\\W(\\\\w+)\\\\W.*\\\\).*', amethod['debug'])", False, 'import re\n'), ((345, 48, 345, 88), 're.match', 're.match', ({(345, 57, 345, 67): '"""(\\\\w+).*"""', (345, 69, 345, 87): "aparameter['name']"}, {}), "('(\\\\w+).*', aparameter['name'])", False, 'import re\n'), ((421, 23, 421, 66), 're.match', 're.match', ({(421, 32, 421, 44): '""".*\\\\>\\\\s*{$"""', (421, 46, 421, 65): "cpp_object['debug']"}, {}), "('.*\\\\>\\\\s*{$', cpp_object['debug'])", False, 'import re\n'), ((434, 34, 434, 71), 're.search', 're.search', ({(434, 44, 434, 49): 'regex', (434, 51, 434, 70): "cpp_object['debug']"}, {}), "(regex, cpp_object['debug'])", False, 'import re\n')] |
laozijiaojiangnan/TheOne | theone/wsgi/server.py | 73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824 | import typing as t
from http.server import HTTPServer, BaseHTTPRequestHandler
from . import response as resp
class WsgiServer(HTTPServer):
pass
class WsgiHandel(BaseHTTPRequestHandler):
def handle(self) -> None:
handle_response = SimpleHandler(self.wfile)
handle_response.send()
class SimpleHandler:
def __init__(self, wfile):
self._response = resp.Response.create_empty() # type: resp.Response
self.sender = wfile
def send(self):
"""像浏览器发送包
node: 下面分成了三次发送,因为合在发送会有 bug,不确定问题,暂时先这样
"""
line = f"{self._response.line.version} {self._response.line.code} {self._response.line.code}\r\n"
self.sender.write(bytes(line, 'utf-8'))
self.add_header(key='Content-Length', value=len(self._response.body.content))
headers = "".join(
[f"{h.key}:{h.value}\r\n" for h in self._response.headers]
)
print(f'headers: {headers}')
self.sender.write(bytes(headers, 'utf-8'))
body = f"\r\n{self._response.body.content}"
self.sender.write(bytes(body, 'utf-8'))
def add_header(self, key: str, value: t.Any) -> t.List[resp.Headers]:
"""添加请求头键值对
Args:
key: 键
value: 值
Return:
存在的所有键值对信息
"""
if self._response is None:
self._response = resp.Response.create_empty()
h = resp.Headers(key=key, value=value)
self._response.headers.append(h)
return self._response.headers
| [] |
dzhulgakov/translate | pytorch_translate/attention/multihead_attention.py | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | #!/usr/bin/env python3
from fairseq.modules import multihead_attention as fair_multihead
from pytorch_translate.attention import (
BaseAttention,
attention_utils,
register_attention,
)
@register_attention("multihead")
class MultiheadAttention(BaseAttention):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
In this implementation, keys and values are both set to encoder output
Inputs
init:
decoder_hidden_state_dim : dimensionality of decoder hidden state
context_dim : dimensionality of encoder output
kwargs :
nheads : integer # of attention heads
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
forward:
decoder_state : [batch size, d_model]
source_hids : [sequence length, batch size, d_model]
src_lengths : [batch size]
forward:
query : [sequence length, batch size, d_model]
key: [sequence length, batch size, d_model]
value: [sequence length, batch size, d_model]
Output
result : [batch_size, d_model]
"""
def __init__(
self,
decoder_hidden_state_dim,
context_dim,
*,
nheads=1,
unseen_mask=False,
src_length_mask=True
):
super().__init__(decoder_hidden_state_dim, context_dim)
assert decoder_hidden_state_dim == context_dim
d_model = decoder_hidden_state_dim # for brevity
assert d_model % nheads == 0
if unseen_mask:
raise NotImplementedError(
"Unseen mask not supported with sequential decoding"
)
self._fair_attn = fair_multihead.MultiheadAttention(d_model, nheads)
self.use_src_length_mask = src_length_mask
def forward(self, decoder_state, source_hids, src_lengths, squeeze=True):
"""
Computes MultiheadAttention with respect to either a vector
or a tensor
Inputs:
decoder_state: (bsz x decoder_hidden_state_dim) or
(bsz x T x decoder_hidden_state_dim)
source_hids: srclen x bsz x context_dim
src_lengths: bsz x 1, actual sequence lengths
squeeze: Whether or not to squeeze on the time dimension.
Even if decoder_state.dim() is 2 dimensional an
explicit time step dimension will be unsqueezed.
Outputs:
[batch_size, max_src_len] if decoder_state.dim() == 2 & squeeze
or
[batch_size, 1, max_src_len] if decoder_state.dim() == 2 & !squeeze
or
[batch_size, T, max_src_len] if decoder_state.dim() == 3 & !squeeze
or
[batch_size, T, max_src_len] if decoder_state.dim() == 3 & squeeze & T != 1
or
[batch_size, max_src_len] if decoder_state.dim() == 3 & squeeze & T == 1
"""
batch_size = decoder_state.shape[0]
if decoder_state.dim() == 3:
query = decoder_state
elif decoder_state.dim() == 2:
query = decoder_state.unsqueeze(1)
else:
raise ValueError("decoder state must be either 2 or 3 dimensional")
query = query.transpose(0, 1)
value = key = source_hids
src_len_mask = None
if src_lengths is not None and self.use_src_length_mask:
# [batch_size, 1, seq_len]
src_len_mask_int = attention_utils.create_src_lengths_mask(
batch_size=batch_size, src_lengths=src_lengths
)
src_len_mask = src_len_mask_int != 1
attn, attn_weights = self._fair_attn.forward(
query, key, value, key_padding_mask=src_len_mask, need_weights=True
)
# attn.shape = T X bsz X embed_dim
# attn_weights.shape = bsz X T X src_len
attn_weights = attn_weights.transpose(0, 2)
# attn_weights.shape = src_len X T X bsz
if squeeze:
attn = attn.squeeze(0)
# attn.shape = squeeze(T) X bsz X embed_dim
attn_weights = attn_weights.squeeze(1)
# attn_weights.shape = src_len X squeeze(T) X bsz
return attn, attn_weights
return attn, attn_weights
| [((12, 1, 12, 32), 'pytorch_translate.attention.register_attention', 'register_attention', ({(12, 20, 12, 31): '"""multihead"""'}, {}), "('multihead')", False, 'from pytorch_translate.attention import BaseAttention, attention_utils, register_attention\n'), ((65, 26, 65, 76), 'fairseq.modules.multihead_attention.MultiheadAttention', 'fair_multihead.MultiheadAttention', ({(65, 60, 65, 67): 'd_model', (65, 69, 65, 75): 'nheads'}, {}), '(d_model, nheads)', True, 'from fairseq.modules import multihead_attention as fair_multihead\n'), ((105, 31, 107, 13), 'pytorch_translate.attention.attention_utils.create_src_lengths_mask', 'attention_utils.create_src_lengths_mask', (), '', False, 'from pytorch_translate.attention import BaseAttention, attention_utils, register_attention\n')] |
RobertD502/home-assistant-lavviebot | custom_components/purrsong/__init__.py | 5c69f474786f043773cba42b7806fb77d4f89672 | """Support for Purrsong LavvieBot S"""
import asyncio
import logging
import voluptuous as vol
from lavviebot import LavvieBotApi
import homeassistant.helpers.config_validation as cv
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup of the component"""
return True
async def async_setup_entry(hass, config_entry):
"""Set up Lavviebot integration from a config entry."""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
_LOGGER.info("Initializing the Lavviebot API")
lavviebot = await hass.async_add_executor_job(LavvieBotApi, username, password)
_LOGGER.info("Connected to API")
hass.data[DOMAIN] = lavviebot
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True
| [((18, 10, 18, 37), 'logging.getLogger', 'logging.getLogger', ({(18, 28, 18, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n')] |
matbocz/kurs-python-udemy | MathPainting_OOP/shapes.py | bbc53d0b2073b400aaad5ff908b3e1c09b815121 | class Rectangle:
"""A rectangle shape that can be drawn on a Canvas object"""
def __init__(self, x, y, width, height, color):
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
def draw(self, canvas):
"""Draws itself into the Canvas object"""
# Changes a slice of the array with new values
canvas.data[self.x: self.x + self.height, self.y: self.y + self.width] = self.color
class Square:
"""A square shape that can be drawn on a Canvas object"""
def __init__(self, x, y, side, color):
self.x = x
self.y = y
self.side = side
self.color = color
def draw(self, canvas):
"""Draws itself into the Canvas object"""
# Changes a slice of the array with new values
canvas.data[self.x: self.x + self.side, self.y: self.y + self.side] = self.color
| [] |
sddyates/mars | problems/Kelvin_Helmholtz/problem.py | a56735bd344b7337151fb419b1c832b0c702ea69 |
from mars import main_loop
import numpy as np
from mars.settings import *
class Problem:
"""
Synopsis
--------
User class for the Kelvin-Helmholtz instability
Args
----
None
Methods
-------
initialise
Set all variables in each cell to initialise the simulation.
internal_bc
Specify the internal boundary for the simulation.
TODO
----
None
"""
def __init__(self):
self.parameter = {
'Name':'Kelvin Helmholtz instability.',
'Dimensions':'2D',
'x1 min':-0.5,
'x1 max':0.5,
'x2 min':-0.5,
'x2 max':0.5,
'x3 min':-0.5,
'x3 max':0.5,
'resolution x1':256,
'resolution x2':256,
'resolution x3':0,
'cfl':0.3,
'initial dt':1.0e-5,
'max dt increase':1.5,
'initial t': 0.0,
'max time': 5.0,
'save frequency': 2.5e-2,
'output type': ['numpy'],
'output primitives': True,
'print to file':False,
'profiling': True,
'restart file':None,
'gamma':1.4,
'density unit':1.0,
'length unit':1.0,
'velocity unit':1.0,
'optimisation': 'numba',
'riemann':'hllc',
'reconstruction':'linear',
'limiter':'minmod',
'time stepping':'RK2',
'method':'hydro',
'lower x1 boundary':'reciprocal',
'upper x1 boundary':'reciprocal',
'lower x2 boundary':'reciprocal',
'upper x2 boundary':'reciprocal',
'lower x3 boundary':'reciprocal',
'upper x3 boundary':'reciprocal',
'internal boundary':False
}
def initialise(self, V, g, l):
if self.parameter['Dimensions'] == '2D':
Y, X = np.meshgrid(g.x1, g.x2, indexing='ij')
if self.parameter['Dimensions'] == '3D':
Z, Y, X = np.meshgrid(g.x1, g.x2, g.x3, indexing='ij')
yp = 0.25
dens_1 = 2.0
dens_2 = 1.0
pres = 2.0
vel_1 = 0.5
vel_2 = 0.0
amp = 0.001
vx1_per = (np.random.random(V.shape)*2.0 - 1)*amp
vx2_per = (np.random.random(V.shape)*2.0 - 1)*amp
region_1 = np.absolute(Y) < yp
region_2 = np.absolute(Y) > yp
V[rho, region_1] = dens_1
V[prs, region_1] = pres
V[vx1, region_1] = vel_1 + vx1_per[vx1, region_1]
V[vx2, region_1] = vel_2 + vx2_per[vx2, region_1]
V[rho, region_2] = dens_2
V[prs, region_2] = pres
V[vx1, region_2] = -vel_1 + vx1_per[vx1, region_2]
V[vx2, region_2] = vel_2 + vx2_per[vx2, region_2]
def internal_bc(self):
return None
if __name__ == "__main__":
main_loop(Problem())
| [((84, 19, 84, 57), 'numpy.meshgrid', 'np.meshgrid', (), '', True, 'import numpy as np\n'), ((87, 22, 87, 66), 'numpy.meshgrid', 'np.meshgrid', (), '', True, 'import numpy as np\n'), ((100, 19, 100, 33), 'numpy.absolute', 'np.absolute', ({(100, 31, 100, 32): 'Y'}, {}), '(Y)', True, 'import numpy as np\n'), ((101, 19, 101, 33), 'numpy.absolute', 'np.absolute', ({(101, 31, 101, 32): 'Y'}, {}), '(Y)', True, 'import numpy as np\n'), ((97, 19, 97, 44), 'numpy.random.random', 'np.random.random', ({(97, 36, 97, 43): 'V.shape'}, {}), '(V.shape)', True, 'import numpy as np\n'), ((98, 19, 98, 44), 'numpy.random.random', 'np.random.random', ({(98, 36, 98, 43): 'V.shape'}, {}), '(V.shape)', True, 'import numpy as np\n')] |
korkeatw/pythainlp | pythainlp/util/thai.py | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | # -*- coding: utf-8 -*-
"""
Check if it is Thai text
"""
import string
_DEFAULT_IGNORE_CHARS = string.whitespace + string.digits + string.punctuation
def isthaichar(ch: str) -> bool:
"""
Check if a character is Thai
เป็นอักษรไทยหรือไม่
:param str ch: input character
:return: True or False
"""
ch_val = ord(ch)
if ch_val >= 3584 and ch_val <= 3711:
return True
return False
def isthai(word: str, ignore_chars: str = ".") -> bool:
"""
Check if all character is Thai
เป็นคำที่มีแต่อักษรไทยหรือไม่
:param str word: input text
:param str ignore_chars: characters to be ignored (i.e. will be considered as Thai)
:return: True or False
"""
if not ignore_chars:
ignore_chars = ""
for ch in word:
if ch not in ignore_chars and not isthaichar(ch):
return False
return True
def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float:
"""
:param str text: input text
:return: float, proportion of characters in the text that is Thai character
"""
if not text or not isinstance(text, str):
return 0
if not ignore_chars:
ignore_chars = ""
num_thai = 0
num_ignore = 0
for ch in text:
if ch in ignore_chars:
num_ignore += 1
elif isthaichar(ch):
num_thai += 1
num_count = len(text) - num_ignore
return (num_thai / num_count) * 100
| [] |
zharmedia386/Data-Science-Stuff | Numpy/tempCodeRunnerFile.py | 40183c329e3b30c582c545c260ca7916f29e2f09 |
print(b)
print(c)
print(d)
print(e)
print(f)
print(g) | [] |
KilroyWasHere-cs-j/savitzky-golay | Python/Filter.py | 2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f | import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
def generate():
# Generate random data
base = np.linspace(0, 5, 11)
# base = np.random.randint(0, 10, 5)
outliers = np.random.randint(10, 20, 2)
data = np.concatenate((base, outliers))
np.random.shuffle(data)
return data
def fill_data():
# Build random data
return np.concatenate((np.array([0]), MadDog.find_outliers(generate()))), np.concatenate(
(np.array([0]), MadDog.find_outliers(generate()))) # np.sin(x) + np.cos(x) + np.random.random(100)
# np.linspace(0, 2*np.pi, 100)
def savitzky(x, y, ploy_nom):
return savgol_filter(x, len(x) - 1, 10), savgol_filter(y, len(y) - 1, 10)
def map(x_filtered, y_filtered, x, y, title="title"):
# Generate some test data
heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
heatmap, xedges, yedges = np.histogram2d(x_filtered, y_filtered, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def show(x_filtered, y_filtered, x, y, title="Lorem ipsum"):
# Plotting
fig = plt.figure()
ax = fig.subplots()
plt.plot(x_filtered, y_filtered, 'red', marker="o")
plt.plot(x, y, 'green', marker="o")
plt.subplots_adjust(bottom=0.25)
plt.xlabel('x')
plt.ylabel('y')
plt.title(title)
plt.legend(["Filter", "Raw"])
plt.show()
# Generating the noisy signal
x, y = fill_data()
print(len(y))
# Savitzky-Golay filter
x_filtered, y_filtered = savitzky(x, y, 2)
print("X unfiltered>> ", x)
print("Y unfiltered>> ", y)
print("X filtered>> ", x_filtered)
print("Y filtered>> ", y_filtered)
show(x_filtered, y_filtered, x, y)
| [((12, 11, 12, 32), 'numpy.linspace', 'np.linspace', ({(12, 23, 12, 24): '0', (12, 26, 12, 27): '5', (12, 29, 12, 31): '11'}, {}), '(0, 5, 11)', True, 'import numpy as np\n'), ((14, 15, 14, 43), 'numpy.random.randint', 'np.random.randint', ({(14, 33, 14, 35): '10', (14, 37, 14, 39): '20', (14, 41, 14, 42): '2'}, {}), '(10, 20, 2)', True, 'import numpy as np\n'), ((15, 11, 15, 43), 'numpy.concatenate', 'np.concatenate', ({(15, 26, 15, 42): '(base, outliers)'}, {}), '((base, outliers))', True, 'import numpy as np\n'), ((16, 4, 16, 27), 'numpy.random.shuffle', 'np.random.shuffle', ({(16, 22, 16, 26): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((34, 30, 34, 59), 'numpy.histogram2d', 'np.histogram2d', (), '', True, 'import numpy as np\n'), ((37, 4, 37, 13), 'matplotlib.pyplot.clf', 'plt.clf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((38, 4, 38, 56), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((39, 4, 39, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((41, 30, 41, 77), 'numpy.histogram2d', 'np.histogram2d', (), '', True, 'import numpy as np\n'), ((44, 4, 44, 13), 'matplotlib.pyplot.clf', 'plt.clf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((45, 4, 45, 56), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((46, 4, 46, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((51, 10, 51, 22), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((53, 4, 53, 55), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((54, 4, 54, 39), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((55, 4, 55, 36), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (), '', True, 'import matplotlib.pyplot as plt\n'), ((56, 4, 56, 19), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(56, 15, 56, 18): '"""x"""'}, {}), "('x')", True, 'import matplotlib.pyplot as plt\n'), ((57, 4, 57, 19), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(57, 15, 57, 18): '"""y"""'}, {}), "('y')", True, 'import matplotlib.pyplot as plt\n'), ((58, 4, 58, 20), 'matplotlib.pyplot.title', 'plt.title', ({(58, 14, 58, 19): 'title'}, {}), '(title)', True, 'import matplotlib.pyplot as plt\n'), ((59, 4, 59, 33), 'matplotlib.pyplot.legend', 'plt.legend', ({(59, 15, 59, 32): "['Filter', 'Raw']"}, {}), "(['Filter', 'Raw'])", True, 'import matplotlib.pyplot as plt\n'), ((60, 4, 60, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((22, 27, 22, 40), 'numpy.array', 'np.array', ({(22, 36, 22, 39): '[0]'}, {}), '([0])', True, 'import numpy as np\n'), ((23, 9, 23, 22), 'numpy.array', 'np.array', ({(23, 18, 23, 21): '[0]'}, {}), '([0])', True, 'import numpy as np\n')] |
dietrichc/streamline-ppc-reports | examples/adwords/v201406/advanced_operations/add_ad_customizer.py | 256f79246aba3c2cf8f792d87a066391a2f471e0 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that
uses the feed to populate dynamic data.
Tags: CustomerFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Tags: AdGroupAdService.mutate
"""
__author__ = ('[email protected] (Mark Saniscalchi)',
'[email protected] (Yufeng Guo)')
# Import appropriate classes from the client library.
from googleads import adwords
# See the Placeholder reference page for a list of all the placeholder types
# and fields:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_AD_CUSTOMIZER = '10'
PLACEHOLDER_FIELD_INTEGER = '1'
PLACEHOLDER_FIELD_FLOAT = '2'
PLACEHOLDER_FIELD_PRICE = '3'
PLACEHOLDER_FIELD_DATE = '4'
PLACEHOLDER_FIELD_STRING = '5'
ADGROUPS = [
'INSERT_ADGROUP_ID_HERE',
'INSERT_ADGROUP_ID_HERE'
]
FEEDNAME = 'INSERT_FEED_NAME_HERE'
def main(client, adgroups):
# Initialize appropriate services.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201406')
customer_feed_service = client.GetService(
'CustomerFeedService', version='v201406')
feed_item_service = client.GetService('FeedItemService', version='v201406')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201406')
feed_service = client.GetService('FeedService', version='v201406')
# First, create a customizer feed. One feed per account can be used for all
# ads.
customizer_feed = {
'name': FEEDNAME,
'attributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['id'],
'nameId': feed['attributes'][0]['id'],
'priceId': feed['attributes'][1]['id'],
'dateId': feed['attributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['name'], feed['id'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
else:
raise Exception('No feeds were added')
# Creating feed mapping to map the fields with customizer IDs.
feed_mapping = {
'placeholderType': PLACEHOLDER_AD_CUSTOMIZER,
'feedId': feed_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': feed_data['nameId'],
'fieldId': PLACEHOLDER_FIELD_STRING
},
{
'feedAttributeId': feed_data['priceId'],
'fieldId': PLACEHOLDER_FIELD_PRICE
},
{
'feedAttributeId': feed_data['dateId'],
'fieldId': PLACEHOLDER_FIELD_DATE
}
]
}
feed_mapping_operation = {
'operator': 'ADD',
'operand': feed_mapping
}
response = feed_mapping_service.mutate([feed_mapping_operation])
if response and 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.') % (feed_mapping['feedMappingId'],
feed_mapping['placeholderType'],
feed_mapping['feedId'])
else:
raise Exception('No feed mappings were added.')
# Now adding feed items -- the values we'd like to place.
items_data = [
{
'name': 'Mars',
'price': '$1234.56',
'date': '20140601 000000',
'adGroupId': adgroups[0]
},
{
'name': 'Venus',
'price': '$1450.00',
'date': '20140615 120000',
'adGroupId': adgroups[1]
}
]
feed_items = [{'feedId': feed_data['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': item['adGroupId']
},
'attributeValues': [
{
'feedAttributeId': feed_data['nameId'],
'stringValue': item['name']
},
{
'feedAttributeId': feed_data['priceId'],
'stringValue': item['price']
},
{
'feedAttributeId': feed_data['dateId'],
'stringValue': item['date']
}
]} for item in items_data]
feed_item_operations = [{
'operator': 'ADD',
'operand': feed_item
} for feed_item in feed_items]
response = feed_item_service.mutate(feed_item_operations)
if response and 'value' in response:
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
else:
raise Exception('No feed items were added.')
# Finally, creating a customer (account-level) feed with a matching function
# that determines when to use this feed. For this case we use the "IDENTITY"
# matching function that is always 'true' just to associate this feed with
# the customer. The targeting is done within the feed items using the
# :campaign_targeting, :ad_group_targeting, or :keyword_targeting attributes.
matching_function = {
'operator': 'IDENTITY',
'lhsOperand': [
{
'xsi_type': 'ConstantOperand',
'type': 'BOOLEAN',
'booleanValue': 'true'
}
]
}
customer_feed = {
'feedId': feed_data['feedId'],
'matchingFunction': matching_function,
'placeholderTypes': [PLACEHOLDER_AD_CUSTOMIZER]
}
customer_feed_operation = {
'operator': 'ADD',
'operand': customer_feed
}
response = customer_feed_service.mutate([customer_feed_operation])
if response and 'value' in response:
feed = response['value'][0]
print 'Customer feed with ID %s was added.' % feed['feedId']
else:
raise Exception('No customer feeds were added.')
# All set! We can now create ads with customizations.
text_ad = {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to {=%s.Name}' % FEEDNAME,
'description1': 'Only {=%s.Price}' % FEEDNAME,
'description2': 'Offer ends in {=countdown(%s.Date)}!' % FEEDNAME,
'url': 'http://www.example.com',
'displayUrl': 'www.example.com'
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': text_ad
}
} for adgroup in adgroups]
print operations
response = ad_group_ad_service.mutate(operations)
print '===ad group ad service==='
print response
if response and 'value' in response:
for ad in response['value']:
print ('\tCreated an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise Exception('No ads were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| [] |
azachar/pyminhash | tests/test_minhash.py | 8a595fb25fe7172ea31d604fe8a40b8c11f1b8af | import pytest
from pyminhash import MinHash
from pyminhash.datasets import load_data
def test__sparse_vector():
df = load_data()
myMinHasher = MinHash(10)
res = myMinHasher._sparse_vectorize(df, 'name')
assert res.columns.tolist() == ['name', 'sparse_vector']
assert res['sparse_vector'].dtype == 'object'
def test__create_hashing_parameters():
n_hashes = 10
myMinHasher = MinHash(n_hash_tables=n_hashes)
res = myMinHasher._create_hashing_parameters()
assert len(res) == n_hashes
assert res.dtype == 'int64'
assert min(res) >= 0
assert min(res) <= myMinHasher.max_token_value
def test__create_minhash():
n_hashes = 10
myMinHasher = MinHash(n_hash_tables=n_hashes)
doc = [59, 65, 66, 67, 118, 150, 266]
res = myMinHasher._create_minhash(doc)
assert len(res) == n_hashes
def test__create_minhash_signatures():
df = load_data()
myMinHasher = MinHash(3)
df = myMinHasher._sparse_vectorize(df, 'name')
df = myMinHasher._create_minhash_signatures(df)
for col in ['hash_0', 'hash_1', 'hash_2']:
assert col in df.columns
assert df[col].dtype == 'int64'
def test_fit_predict():
df = load_data()
myMinHasher = MinHash(10)
res = myMinHasher.fit_predict(df, 'name')
assert res.columns.tolist() == ['row_number_1', 'row_number_2', 'name_1', 'name_2', 'jaccard_sim']
assert res['jaccard_sim'].dtype == 'float'
def test_fit_predict_accuracy():
def jaccard(x, y):
x_tokens = set(x.split())
y_tokens = set(y.split())
return len(x_tokens.intersection(y_tokens)) / len(x_tokens.union(y_tokens))
df = load_data()
myMinHasher = MinHash(1000)
res = myMinHasher.fit_predict(df, 'name')
assert len(res) == 1727
res['jaccard_real'] = res.apply(lambda row: jaccard(row['name_1'], row['name_2']), axis=1)
res['diff'] = res['jaccard_real'] - res['jaccard_sim']
assert abs(res['diff'].mean()) < 0.02
assert res['diff'].std() < 0.1
| [((8, 9, 8, 20), 'pyminhash.datasets.load_data', 'load_data', ({}, {}), '()', False, 'from pyminhash.datasets import load_data\n'), ((9, 18, 9, 29), 'pyminhash.MinHash', 'MinHash', ({(9, 26, 9, 28): '10'}, {}), '(10)', False, 'from pyminhash import MinHash\n'), ((17, 18, 17, 49), 'pyminhash.MinHash', 'MinHash', (), '', False, 'from pyminhash import MinHash\n'), ((27, 18, 27, 49), 'pyminhash.MinHash', 'MinHash', (), '', False, 'from pyminhash import MinHash\n'), ((34, 9, 34, 20), 'pyminhash.datasets.load_data', 'load_data', ({}, {}), '()', False, 'from pyminhash.datasets import load_data\n'), ((35, 18, 35, 28), 'pyminhash.MinHash', 'MinHash', ({(35, 26, 35, 27): '3'}, {}), '(3)', False, 'from pyminhash import MinHash\n'), ((44, 9, 44, 20), 'pyminhash.datasets.load_data', 'load_data', ({}, {}), '()', False, 'from pyminhash.datasets import load_data\n'), ((45, 18, 45, 29), 'pyminhash.MinHash', 'MinHash', ({(45, 26, 45, 28): '10'}, {}), '(10)', False, 'from pyminhash import MinHash\n'), ((57, 9, 57, 20), 'pyminhash.datasets.load_data', 'load_data', ({}, {}), '()', False, 'from pyminhash.datasets import load_data\n'), ((58, 18, 58, 31), 'pyminhash.MinHash', 'MinHash', ({(58, 26, 58, 30): '1000'}, {}), '(1000)', False, 'from pyminhash import MinHash\n')] |
danylo-dudok/youtube-rss | settings.py | c4478605274cdeac33f909d7fcb7d265898e80bc | from datetime import datetime, timedelta
from typing import final
from tools import localize_time
RSS_URL_PREFIX: final = 'https://www.youtube.com/feeds/videos.xml?channel_id={0}'
LOCATION_ARGUMENT_PREFIX: final = '--location='
CHANNEL_ARGUMENT_PREFIX: final = '--channels='
LAST_CHECK_ARGUMENT_PREFIX: final = '--last-check='
TWO_WEEKS_IN_DAYS: final = 14
DEFAULT_LAST_CHECK: final = localize_time(datetime.now() - timedelta(days=TWO_WEEKS_IN_DAYS))
EMPTY: final = ''
CHANNEL_POSTS_LIMIT: final = 20
| [((11, 42, 11, 56), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((11, 59, 11, 92), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n')] |
DX-MON/OpenPICle | openpicle/caravel.py | c036333f807b1b4959af22bde8c4cac553ef162f | # SPDX-License-Identifier: BSD-3-Clause
from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter
__all__ = (
'PIC16Caravel',
)
class PIC16Caravel(Elaboratable):
def elaborate(self, platform):
from .pic16 import PIC16
from .soc.busses.qspi import QSPIBus
m = Module()
reset = Signal()
busy_n = Signal(reset = 1)
m.submodules.qspiFlash = qspiFlash = QSPIBus(resourceName = ('spi_flash_4x', 0))
m.submodules.pic = pic = ResetInserter(reset)(EnableInserter(busy_n)(PIC16()))
run = platform.request('run', 0)
pBus = platform.request('p_bus', 0)
addr = pBus.addr.o
dataIn = pBus.data.i
dataOut = pBus.data.o
dataDir = pBus.data.oe
read = pBus.read
write = pBus.write
with m.If(qspiFlash.complete | reset):
m.d.sync += busy_n.eq(1)
with m.Elif(pic.iBus.read):
m.d.sync += busy_n.eq(0)
m.d.comb += [
reset.eq(~qspiFlash.ready),
run.o.eq(qspiFlash.ready & busy_n),
qspiFlash.address[0].eq(0),
qspiFlash.address[1:].eq(pic.iBus.address),
pic.iBus.data.eq(qspiFlash.data),
qspiFlash.read.eq(pic.iBus.read),
addr.eq(pic.pBus.address),
read.eq(pic.pBus.read),
pic.pBus.readData.eq(dataIn),
write.eq(pic.pBus.write),
dataOut.eq(pic.pBus.writeData),
dataDir.eq(pic.pBus.write),
]
return m
def get_ports(self):
return []
| [((12, 6, 12, 14), 'amaranth.Module', 'Module', ({}, {}), '()', False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((13, 10, 13, 18), 'amaranth.Signal', 'Signal', ({}, {}), '()', False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((14, 11, 14, 28), 'amaranth.Signal', 'Signal', (), '', False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((17, 27, 17, 47), 'amaranth.ResetInserter', 'ResetInserter', ({(17, 41, 17, 46): 'reset'}, {}), '(reset)', False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((17, 48, 17, 70), 'amaranth.EnableInserter', 'EnableInserter', ({(17, 63, 17, 69): 'busy_n'}, {}), '(busy_n)', False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n')] |
est73/raid-shack | cogs/stats.py | 727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e | from discord.ext import commands
import discord
class Stats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_permissions(manage_channels=True)
async def stats(self, ctx):
members = await ctx.guild.fetch_members(limit=None).flatten()
member_count = 0
member_role_count = 0
instinct_count = 0
mystic_count = 0
valor_count = 0
ign_count = 0
tc_count = 0
level_count = 0
country_count = 0
profile_count = 0
for member in members:
if not member.bot:
member_count += 1
for role in member.roles:
if role.name == "Member":
member_role_count += 1
if role.name == "instinct":
instinct_count += 1
if role.name == "mystic":
mystic_count += 1
if role.name == "valor":
valor_count += 1
if role.name == "ign":
ign_count += 1
if role.name == "tc":
tc_count += 1
if role.name == "level":
level_count += 1
if role.name == "country":
country_count += 1
if role.name == "profile":
profile_count += 1
values = [f'Members: {member_count}',
f'Members Role: {member_role_count}',
f'Members on Team Instinct: {instinct_count}',
f'Members on Team Mystic: {mystic_count}',
f'Members on Team Valor: {valor_count}',
f'Members with IGN set: {ign_count}',
f'Members with TC set: {tc_count}',
f'Members with level set: {level_count}',
f'Members with country set: {country_count}',
f'Members with completed Nexus Profiles: {profile_count}']
embed = discord.Embed(color=discord.Color.green())
embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
embed.add_field(name='Server Stats:', value='\n'.join(values), inline=False)
await ctx.send(embed=embed)
@stats.error
async def permission_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Sorry, you can't run this command")
else:
raise error
def setup(bot):
bot.add_cog(Stats(bot))
| [((10, 5, 10, 23), 'discord.ext.commands.command', 'commands.command', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((11, 5, 11, 51), 'discord.ext.commands.has_permissions', 'commands.has_permissions', (), '', False, 'from discord.ext import commands\n'), ((58, 36, 58, 57), 'discord.Color.green', 'discord.Color.green', ({}, {}), '()', False, 'import discord\n')] |
WillChilds-Klein/mistress-mapreduce | tests/bucket/test_bucket.py | c991a502545bd0d3ec4f914cdc63faf6a40e77ae | from mrs.bucket import WriteBucket
from mrs import BinWriter, HexWriter
def test_writebucket():
b = WriteBucket(0, 0)
b.addpair((4, 'test'))
b.collect([(3, 'a'), (1, 'This'), (2, 'is')])
values = ' '.join(value for key, value in b)
assert values == 'test a This is'
b.sort()
values = ' '.join(value for key, value in b)
assert values == 'This is a test'
def test_write_only():
b = WriteBucket(0, 0)
b.addpair((4, 'test'), write_only=True)
b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True)
values = ' '.join(value for key, value in b)
assert values == ''
readonly_copy = b.readonly_copy()
assert readonly_copy.url is None
def test_writing(tmpdir):
b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter)
prefix = b.prefix()
assert prefix == 'source_2_split_4_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((1, 2))
filename = prefix + '.mrsb'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
def test_roundtrip(tmpdir):
b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter)
prefix = b.prefix()
assert prefix == 'source_2_split_4_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((4, 'test'))
b.collect([(3, 'a'), (1, 'This'), (2, 'is')])
values = ' '.join(value for key, value in b)
assert values == 'test a This is'
b.close_writer(do_sync=False)
filename = prefix + '.mrsb'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
values = ' '.join(value for key, value in readonly_copy)
assert values == 'test a This is'
values = ' '.join(value for key, value in readonly_copy.stream())
assert values == 'test a This is'
b.clean()
listdir = tmpdir.listdir()
assert listdir == []
def test_roundtrip_write_only(tmpdir):
b = WriteBucket(7, 1, dir=tmpdir.strpath, format=HexWriter)
prefix = b.prefix()
assert prefix == 'source_7_split_1_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((4, 'test'), write_only=True)
b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True)
values = ' '.join(value for key, value in b)
assert values == ''
b.close_writer(do_sync=False)
filename = prefix + '.mrsx'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
values = ' '.join(value for key, value in readonly_copy)
assert values == ''
values = ' '.join(value for key, value in readonly_copy.stream())
assert values == 'test a This is'
b.clean()
listdir = tmpdir.listdir()
assert listdir == []
# vim: et sw=4 sts=4
| [((5, 8, 5, 25), 'mrs.bucket.WriteBucket', 'WriteBucket', ({(5, 20, 5, 21): '0', (5, 23, 5, 24): '0'}, {}), '(0, 0)', False, 'from mrs.bucket import WriteBucket\n'), ((17, 8, 17, 25), 'mrs.bucket.WriteBucket', 'WriteBucket', ({(17, 20, 17, 21): '0', (17, 23, 17, 24): '0'}, {}), '(0, 0)', False, 'from mrs.bucket import WriteBucket\n'), ((28, 8, 28, 63), 'mrs.bucket.WriteBucket', 'WriteBucket', (), '', False, 'from mrs.bucket import WriteBucket\n'), ((46, 8, 46, 63), 'mrs.bucket.WriteBucket', 'WriteBucket', (), '', False, 'from mrs.bucket import WriteBucket\n'), ((79, 8, 79, 63), 'mrs.bucket.WriteBucket', 'WriteBucket', (), '', False, 'from mrs.bucket import WriteBucket\n')] |
HARPLab/inquire | inquire/agents/dempref.py | fa74eb10e5391a0f226753668a31527c68fc6962 | """
An agent which uses demonstrations and preferences.
Code adapted from Learning Reward Functions
by Integrating Human Demonstrations and Preferences.
"""
import itertools
import os
import time
from pathlib import Path
from typing import Dict, List
import arviz as az
from inquire.agents.agent import Agent
from inquire.environments.environment import Environment
from inquire.interactions.feedback import Query, Trajectory
from inquire.interactions.modalities import Preference
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import pymc3.distributions.transforms as tr
import scipy.optimize as opt
import theano.tensor as tt
class DemPref(Agent):
"""A preference-querying agent seeded with demonstrations.
Note: We instantiate the agent according to arguments corresponding to
what the the original paper's codebase designates as their main experiment.
"""
def __init__(
self,
weight_sample_count: int,
trajectory_sample_count: int,
trajectory_length: int,
interaction_types: list = [],
w_dim: int = 4,
which_param_csv: int = 0,
visualize: bool = False,
):
"""Initialize the agent.
Note we needn't maintain a domain's start state; that's handled in
inquire/tests/evaluation.py and the respective domain.
"""
self._weight_sample_count = weight_sample_count
self._trajectory_sample_count = trajectory_sample_count
self._trajectory_length = trajectory_length
self._interaction_types = interaction_types
self._visualize = visualize
"""
Get the pre-defined agent parameters
"""
self._dempref_agent_parameters = self.read_param_csv(which_param_csv)
"""
Instance attributes from orginal codebase's 'runner.py' object. Note
that some variable names are modified to be consist with the Inquire
parlance.
"""
self.domain_name = self._dempref_agent_parameters["domain"][0]
self.teacher_type = self._dempref_agent_parameters["teacher_type"][0]
self.n_demos = self._dempref_agent_parameters["n_demos"][0]
self.gen_demos = self._dempref_agent_parameters["gen_demos"][0]
self.opt_iter_count = self._dempref_agent_parameters["opt_iter_count"][
0
]
self.trim_start = self._dempref_agent_parameters["trim_start"][0]
self.query_option_count = self._dempref_agent_parameters[
"query_option_count"
][0]
self.update_func = self._dempref_agent_parameters["update_func"][0]
self.trajectory_length = self._dempref_agent_parameters[
"trajectory_length"
][0]
self.incl_prev_query = self._dempref_agent_parameters[
"incl_prev_query"
][0]
self.gen_scenario = self._dempref_agent_parameters["gen_scenario"][0]
self.n_pref_iters = self._dempref_agent_parameters["n_pref_iters"][0]
self.epsilon = self._dempref_agent_parameters["epsilon"][0]
"""
Instantiate the DemPref-specific sampler and query generator:
"""
self._sampler = None
self._w_samples = None
self._query_generator = None
self._first_q_session = True
self._q_session_index = 0
self._query_index = 0
self._w_dim = w_dim
assert (
self.update_func == "pick_best"
or self.update_func == "approx"
or self.update_func == "rank"
), ("Update" " function must be one of the provided options")
if self.incl_prev_query and self.teacher_type == "term":
assert (
self.n_demos > 0
), "Cannot include previous query if no demonstration is provided"
self.n_samples_summ = self._dempref_agent_parameters["n_samples_summ"][
0
]
self.n_samples_exp = self._dempref_agent_parameters["n_samples_exp"][0]
self.beta_demo = self._dempref_agent_parameters["beta_demo"][0]
self.beta_pref = self._dempref_agent_parameters["beta_pref"][0]
self.beta_teacher = self._dempref_agent_parameters["beta_teacher"][0]
"""If we want to save data as they did in DemPref:"""
self.first_q_session = True
self.q_session_index = 0
self.query_index = 0
self.config = [
self.teacher_type,
self.n_demos,
self.trim_start,
self.query_option_count,
self.update_func,
self.trajectory_length,
self.incl_prev_query,
self.gen_scenario,
self.n_pref_iters,
self.epsilon,
self.n_samples_summ,
self.n_samples_exp,
self.beta_demo,
self.beta_pref,
self.beta_teacher,
]
self.df = pd.DataFrame(columns=["run #", "pref_iter", "type", "value"])
def initialize_weights(self, domain: Environment) -> np.ndarray:
"""Randomly initialize weights for gradient descent."""
self.reset()
return self.w_samples
def reset(self) -> None:
"""Prepare for new query session."""
if self._sampler is not None:
self._sampler.clear_pref()
self._sampler = self.DemPrefSampler(
query_option_count=self.query_option_count,
dim_features=self._w_dim,
update_func=self.update_func,
beta_demo=self.beta_demo,
beta_pref=self.beta_pref,
visualize=self._visualize,
)
self.w_samples = self._sampler.sample(N=self.n_samples_summ)
"""If we want to save data as they did in DemPref:"""
mean_w = np.mean(self.w_samples, axis=0)
mean_w = mean_w / np.linalg.norm(mean_w)
var_w = np.var(self.w_samples, axis=0)
# Make sure to properly index data:
if self.first_q_session:
self.first_q_session = False
else:
self.q_session_index += 1
data = [
[self.q_session_index, 0, "mean", mean_w],
[self.q_session_index, 0, "var", var_w],
]
self.df = self.df.append(
pd.DataFrame(
data, columns=["run #", "pref_iter", "type", "value"]
),
ignore_index=True,
)
def generate_query(
self,
domain: Environment,
query_state: int,
curr_w: np.ndarray,
verbose: bool = False,
) -> list:
"""Generate query using approximate gradients.
Code adapted from DemPref's ApproxQueryGenerator.
"""
if self._query_generator is None:
self._query_generator = self.DemPrefQueryGenerator(
dom=domain,
num_queries=self.query_option_count,
trajectory_length=self.trajectory_length,
num_expectation_samples=self.n_samples_exp,
include_previous_query=self.incl_prev_query,
generate_scenario=self.gen_scenario,
update_func=self.update_func,
beta_pref=self.beta_pref,
)
if self.incl_prev_query:
if len(self.demos) > 0:
self.random_scenario_index = np.random.randint(len(self.demos))
else:
self.random_scenario_index = 0
last_query_choice = self.all_query_choices[
self.random_scenario_index
]
# Generate query_options while ensuring that features of query_options
# are epsilon apart:
query_diff = 0
print("Generating query_options")
while query_diff <= self.epsilon:
if self.incl_prev_query:
if last_query_choice.null:
query_options = self._query_generator.generate_query_options(
self.w_samples, blank_traj=True
)
else:
query_options = self._query_generator.generate_query_options(
self.w_samples, last_query_choice
)
else:
query_options = self._query_generator.generate_query_options(
self.w_samples
)
query_diffs = []
for m in range(len(query_options)):
for n in range(m):
query_diffs.append(
np.linalg.norm(
domain.features_from_trajectory(
query_options[m].trajectory
)
- domain.features_from_trajectory(
query_options[n].trajectory
)
)
)
query_diff = max(query_diffs)
query = Query(
query_type=Preference,
task=None,
start_state=query_state,
trajectories=query_options,
)
return query
def update_weights(
self, current_weights: np.ndarray, domain: Environment, feedback: list
) -> np.ndarray:
"""Update the model's learned weights.
::inputs:
::current_weights: Irrelevant for DemPref; useful to other agents
::domain: The task's environment
::feedback: A list of the human feedback received to this point.
DemPref utilizes only the most recent
"""
if feedback == []:
# No feedback yet received
return self.w_samples
else:
# Use the most recent Choice in feedback:
query_options = feedback[-1].choice.options
choice = feedback[-1].choice.selection
choice_index = query_options.index(choice)
if self.incl_prev_query:
self.all_query_choices[self.random_scenario_index] = choice
# Create dictionary map from rankings to query-option features;
# load into sampler:
features = [
domain.features_from_trajectory(x.trajectory)
for x in query_options
]
phi = {k: features[k] for k in range(len(query_options))}
self._sampler.load_prefs(phi, choice_index)
self.w_samples = self._sampler.sample(N=self.n_samples_summ)
# Return the new weights from the samples:
mean_w = np.mean(self.w_samples, axis=0)
mean_w = mean_w / np.linalg.norm(mean_w)
return np.array(mean_w, copy=True).reshape(1, -1)
def read_param_csv(self, which_csv: int = 0) -> dict:
"""Read an agent-parameterization .csv.
::inputs:
:creation_index: A time-descending .csv file index.
e.g. if creation_index = 0, use the dempref
dempref_agent.csv most recently created.
"""
data_path = Path.cwd() / Path("../inquire/agents/")
# Sort the .csvs in descending order by time of creation:
all_files = np.array(list(Path.iterdir(data_path)))
all_csvs = all_files[
np.argwhere([f.suffix == ".csv" for f in all_files])
]
all_csvs = np.array([str(f[0]).strip() for f in all_csvs])
sorted_csvs = sorted(all_csvs, key=os.path.getmtime)
sorted_csvs = [Path(c) for c in sorted_csvs]
# Select the indicated .csv and convert it to a dictionary:
chosen_csv = sorted_csvs[-which_csv]
df = pd.read_csv(chosen_csv)
params_dict = df.to_dict()
return params_dict
def process_demonstrations(
self, trajectories: list, domain: Environment
) -> None:
"""Generate demonstrations to seed the querying process."""
self.demos = trajectories
phi_demos = [
domain.features_from_trajectory(x.trajectory) for x in self.demos
]
self._sampler.load_demo(np.array(phi_demos))
self.cleaned_demos = self.demos
if self.incl_prev_query:
self.all_query_choices = [d for d in self.cleaned_demos]
class DemPrefSampler:
"""Sample trajectories for querying.
Code adapted from original DemPref agent.
"""
def __init__(
self,
query_option_count: int,
dim_features: int,
update_func: str = "approx",
beta_demo: float = 0.1,
beta_pref: float = 1.0,
visualize: bool = False,
):
"""
Initialize the sampler.
:param query_option_count: Number of queries.
:param dim_features: Dimension of feature vectors.
:param update_func: options are "rank", "pick_best", and
"approx". To use "approx", query_option_count
must be 2; will throw an assertion error
otherwise
:param beta_demo: parameter measuring irrationality of teacher in
providing demonstrations
:param beta_pref: parameter measuring irrationality of teacher in
selecting preferences
"""
self.query_option_count = query_option_count
self.dim_features = dim_features
self.update_func = update_func
self.beta_demo = beta_demo
self.beta_pref = beta_pref
self._visualize = visualize
if self.update_func == "approx":
assert (
self.query_option_count == 2
), "Cannot use approximation to update function if query_option_count > 2"
elif not (
self.update_func == "rank" or self.update_func == "pick_best"
):
raise Exception(
update_func + " is not a valid update function."
)
# feature vectors from demonstrated trajectories
self.phi_demos = np.zeros((1, self.dim_features))
# a list of np.arrays containing feature difference vectors and
# which encode the ranking from the preference
# queries
self.phi_prefs = []
def load_demo(self, phi_demos: np.ndarray):
"""
Load the demonstrations into the Sampler.
:param demos: a Numpy array containing feature vectors for each
demonstration; has dimension
n_dem -by- self.dim_features
"""
self.phi_demos = phi_demos
def load_prefs(self, phi: Dict, rank):
"""
Load the results of a preference query into the Sampler.
:param phi: a dictionary mapping rankings
(0,...,query_option_count-1) to feature vectors
"""
result = []
if self.update_func == "rank":
result = [None] * len(rank)
for i in range(len(rank)):
result[i] = phi[rank[i]]
elif self.update_func == "approx":
result = phi[rank] - phi[1 - rank]
elif self.update_func == "pick_best":
result, tmp = [phi[rank] - phi[rank]], []
for key in sorted(phi.keys()):
if key != rank:
tmp.append(phi[key] - phi[rank])
result.extend(tmp)
self.phi_prefs.append(np.array(result))
def clear_pref(self):
"""Clear all preference information from the sampler."""
self.phi_prefs = []
def sample(self, N: int, T: int = 1, burn: int = 1000) -> np.ndarray:
"""Return N samples from the distribution.
The distribution is defined by applying update_func on the
demonstrations and preferences observed thus far.
:param N: number of w_samples to draw.
:param T: if greater than 1, all samples except each T^{th}
sample are discarded
:param burn: how many samples before the chain converges;
these initial samples are discarded
:return: list of w_samples drawn
"""
"""Define model for MCMC.
NOTE the DemPref codebase creates a sampler via PyMC3 version 3.5;
this codebase adapts their model to PyMC3 version 3.11.2.
We use the NUTS sampling algorithm (an extension of
Hamilitonian Monte Carlo MCMC): https://arxiv.org/abs/1111.4246.
"""
# Define update function:
if self.update_func == "approx":
def update_function(distribution):
result = tt.sum(
[
-tt.nnet.relu(
-self.beta_pref
* tt.dot(self.phi_prefs[i], distribution)
)
for i in range(len(self.phi_prefs))
]
) + tt.sum(
self.beta_demo * tt.dot(self.phi_demos, distribution)
)
return result
elif self.update_func == "pick_best":
def update_function(distribution):
result = tt.sum(
[
-tt.log(
tt.sum(
tt.exp(
self.beta_pref
* tt.dot(
self.phi_prefs[i], distribution
)
)
)
)
for i in range(len(self.phi_prefs))
]
) + tt.sum(
self.beta_demo * tt.dot(self.phi_demos, distribution)
)
return result
elif self.update_func == "rank":
def update_function(distribution):
result = (
tt.sum( # sum across different queries
[
tt.sum( # sum across different terms in PL-update
-tt.log(
[
tt.sum( # sum down different feature-differences in a single term in PL-update
tt.exp(
self.beta_pref
* tt.dot(
self.phi_prefs[i][
j:, :
]
- self.phi_prefs[i][j],
distribution,
)
)
)
for j in range(
self.query_option_count
)
]
)
)
for i in range(len(self.phi_prefs))
]
)
+ tt.sum(
self.beta_demo
* tt.dot(self.phi_demos, distribution)
),
)
return result
self.update_function = update_function
while True:
test_value = np.random.uniform(
low=-1, high=1, size=self.dim_features
)
test_value = test_value / np.linalg.norm(test_value)
norm = (test_value ** 2).sum()
if norm <= 1:
break
# Get a sampling trace (and avoid Bad Initial Energy):
while True:
trace = self.get_trace(test_value)
if trace is not None:
break
if self._visualize:
az.plot_trace(trace)
plt.show()
input("Press enter to continue")
az.plot_energy(trace)
plt.show()
input("Press enter to continue")
az.plot_posterior(trace)
plt.show()
input("Press enter to continue")
all_samples = trace.sel(
draw=slice(burn, None)
).posterior.rv_x.values
all_samples = all_samples.reshape(
all_samples.shape[0] * all_samples.shape[1], -1
)
w_samples = np.array([r / np.linalg.norm(r) for r in all_samples])
return w_samples
def get_trace(self, test_val: np.ndarray) -> az.InferenceData:
"""Create an MCMC trace."""
# model accumulates the objects defined within the proceeding
# context:
model = pm.Model()
with model:
# Add random-variable x to model:
rv_x = pm.Uniform(
name="rv_x",
shape=self.dim_features,
lower=-1,
upper=1,
testval=test_val,
)
# Define the prior as the unit ball centered at 0:
def sphere(w):
"""Determine if w is part of the unit ball."""
w_sum = pm.math.sqr(w).sum()
result = tt.switch(
pm.math.gt(w_sum, 1.0),
-100,
# -np.inf,
self.update_function(w),
)
return result
try:
# Potential is a "potential term" defined as an "additional
# tensor...to be added to the model logp"(PyMC3 developer
# guide). In this instance, the potential is effectively
# the model's log-likelihood.
p = pm.Potential("sphere", sphere(rv_x))
trace = pm.sample(
10000,
tune=5000,
return_inferencedata=True,
init="adapt_diag",
)
# except:
except (
pm.SamplingError,
pm.parallel_sampling.ParallelSamplingError,
):
return None
return trace
class DemPrefQueryGenerator:
"""Generate queries.
Code adapted from original DemPref agent.
"""
def __init__(
self,
dom: Environment,
num_queries: int,
trajectory_length: int,
num_expectation_samples: int,
include_previous_query: bool,
generate_scenario: bool,
update_func: str,
beta_pref: float,
) -> None:
"""
Initialize the approx query generation.
Note: this class generates queries using approx gradients.
::original inputs:
:dom: the domain to generate queries on
:num_queries: number of queries to generate at each time step
:trajectory_length: the length of each query
:num_expectation_samples: number of w_samples to use in
approximating the objective
function
:include_previous_query: boolean for whether one of the
queries is the previously selected
query
:generate_scenario: boolean for whether we want to generate
the scenario -- i.e., other agents'
behavior
:update_func: the update_func used; the options are
"pick_best", "approx", and "rank"
:beta_pref: the rationality parameter for the teacher
selecting her query
::Inquire-specific inputs:
:start_state: The state from which a trajectory begins.
"""
assert (
num_queries >= 1
), "QueryGenerator.__init__: num_queries must be at least 1"
assert (
trajectory_length >= 1
), "QueryGenerator.__init__: trajectory_length must be at least 1"
assert (
num_expectation_samples >= 1
), "QueryGenerator.__init__: num_expectation_samples must be \
at least 1"
self.domain = dom
self.num_queries = num_queries
self.trajectory_length = trajectory_length
self.num_expectation_samples = num_expectation_samples
self.include_previous_query = include_previous_query
self.generate_scenario = (
generate_scenario # Currently must be False
)
assert (
self.generate_scenario is False
), "Cannot generate scenario when using approximate gradients"
self.update_func = update_func
self.beta_pref = beta_pref
self.num_new_queries = (
self.num_queries - 1
if self.include_previous_query
else self.num_queries
)
def generate_query_options(
self,
w_samples: np.ndarray,
last_query_choice: Trajectory = None,
blank_traj: bool = False,
) -> List[Trajectory]:
"""
Generate self.num_queries number of queries.
This function produces query options that (locally) maximize the
maximum volume removal objective.
:param w_samples: Samples of w
:param last_query_choice: The previously selected query. Only
required if self.incl_prev_query is
True
:param blank_traj: True is last_query_choice is blank. (Only
True if not using Dempref but using incl_prev_)
:return: a list of trajectories (queries)
"""
start = time.perf_counter()
def func(controls: np.ndarray, *args) -> float:
"""Minimize via L_BFGS.
:param controls: an array, concatenated to contain the control
input for all queries
:param args: the first argument is the domain, and the second
is the samples that will be used to approximate
the objective function
:return: the value of the objective function for the given set
of controls
"""
domain = args[0]
w_samples = args[1]
controls = np.array(controls)
controls_set = [
controls[i * z : (i + 1) * z]
for i in range(self.num_new_queries)
]
features_each_q_option = np.zeros(
(domain.w_dim, self.num_new_queries)
)
for i, c in enumerate(controls_set):
features_each_q_option[
:, i
] = domain.features_from_trajectory(
c, controls_as_input=True
)
if self.include_previous_query and not blank_traj:
features_each_q_option = np.append(
features_each_q_option,
domain.features_from_trajectory(last_query_choice),
axis=1,
)
if self.update_func == "pick_best":
return -objective(features_each_q_option, w_samples)
elif self.update_func == "approx":
return -approx_objective(features_each_q_option, w_samples)
else:
return -rank_objective(features_each_q_option, w_samples)
def objective(features: List, w_samples: np.ndarray) -> float:
"""
Maximize the volume removal objective.
:param features: a list containing the feature values of each
query
:param w_samples: samples of w, used to approximate the
objective
:return: the value of the objective function, evaluated on the
given queries' features
"""
volumes_removed = []
for i in range(len(features)):
feature_diff = np.array(
[f - features[i] for f in features]
) # query_option_count x feature_size
weighted_feature_diff = (
np.sum(np.dot(feature_diff, w_samples.T), axis=1)
/ w_samples.shape[0]
) # query_option_count x 1 -- summed across w_samples
v_removed = 1.0 - 1.0 / np.sum(
np.exp(self.beta_pref * weighted_feature_diff)
)
volumes_removed.append(v_removed)
return np.min(volumes_removed)
def approx_objective(
features: np.ndarray, w_samples: np.ndarray
) -> float:
"""
Approximate the maximum volume removal objective.
:param features: the feature values of each query option
:param w_samples: w_samples of w used to approximate the
objective
:return: the value of the objective function, evaluated on the
given queries' features
"""
if features.shape[0] > features.shape[1]:
features = features.T
volumes_removed = []
for i in range(len(features)):
feature_diff = (
features[i] - features[1 - i]
) # 1 x feature_size
weighted_feature_diff = (
np.sum(np.dot(feature_diff, w_samples.T))
/ w_samples.shape[0]
) # 1 x 1 -- summed across w_samples
v_removed = 1.0 - np.minimum(
1.0, np.exp(self.beta_pref * weighted_feature_diff)
)
volumes_removed.append(v_removed)
return np.min(volumes_removed)
def rank_objective(features, w_samples) -> float:
"""
The ranking maximum volume removal objective function.
Note: This objective uses the Plackett-Luce model of
teacher behavior.
CANNOT BE USED WITH (incl_prev_QUERY AND NO DEMPREF).
:param features: a list containing the feature values of each
query
:param w_samples: samples of w, used to approximate the
objective
:return: the value of the objective function, evaluated on the
given queries' features
"""
# features: query_option_count x feature_size
# w_samples: n_samples x feature_size
exp_rewards = (
np.sum(np.dot(features, w_samples.T), axis=1)
/ w_samples.shape[0]
) # query_option_count x 1 -- summed across w_samples
volumes_removed = []
rankings = itertools.permutations(
list(range(self.num_queries))
) # iterating over all possible rankings
for rank in rankings:
exp_rewards_sorted = [None] * len(rank)
for i in range(len(rank)):
exp_rewards_sorted[rank[i]] = exp_rewards[i]
value, i = 1, 0
for i in range(len(rank) - 1):
value *= 1.0 / np.sum(
np.exp(
self.beta_pref
* (
np.array(exp_rewards_sorted[i:])
- exp_rewards_sorted[i]
)
)
)
volumes_removed.append(1 - value)
return np.min(volumes_removed)
# The following optimization is w.r.t. volume removal; the domain's
# optimization is w.r.t. the linear combination of weights and
# features; this difference is a trait of the DemPref codebase.
z = self.trajectory_length * self.domain.control_size
lower_input_bound = [
x[0] for x in self.domain.control_bounds
] * self.trajectory_length
upper_input_bound = [
x[1] for x in self.domain.control_bounds
] * self.trajectory_length
opt_res = opt.fmin_l_bfgs_b(
func,
x0=np.random.uniform(
low=self.num_new_queries * lower_input_bound,
high=self.num_new_queries * upper_input_bound,
size=(self.num_new_queries * z),
),
args=(self.domain, w_samples),
bounds=self.domain.control_bounds
* self.num_new_queries
* self.trajectory_length,
approx_grad=True,
)
query_options_controls = [
opt_res[0][i * z : (i + 1) * z]
for i in range(self.num_new_queries)
]
end = time.perf_counter()
print(f"Finished computing queries in {end - start}s")
# Note the domain was reset w/ appropriate seed before beginning
# this query session; domain.run(c) will thus reset to appropriate
# state:
raw_trajectories = [
self.domain.run(c) for c in query_options_controls
]
raw_phis = [
self.domain.features_from_trajectory(t)
for t in raw_trajectories
]
query_options_trajectories = [
Trajectory(raw_trajectories[i], raw_phis[i])
for i in range(len(raw_trajectories))
]
if self.include_previous_query and not blank_traj:
return [last_query_choice] + query_options_trajectories
else:
return query_options_trajectories
| [((146, 18, 146, 79), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((167, 17, 167, 48), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((169, 16, 169, 46), 'numpy.var', 'np.var', (), '', True, 'import numpy as np\n'), ((250, 16, 255, 9), 'inquire.interactions.feedback.Query', 'Query', (), '', False, 'from inquire.interactions.feedback import Query, Trajectory\n'), ((314, 13, 314, 36), 'pandas.read_csv', 'pd.read_csv', ({(314, 25, 314, 35): 'chosen_csv'}, {}), '(chosen_csv)', True, 'import pandas as pd\n'), ((168, 26, 168, 48), 'numpy.linalg.norm', 'np.linalg.norm', ({(168, 41, 168, 47): 'mean_w'}, {}), '(mean_w)', True, 'import numpy as np\n'), ((180, 12, 182, 13), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((291, 21, 291, 52), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((303, 20, 303, 30), 'pathlib.Path.cwd', 'Path.cwd', ({}, {}), '()', False, 'from pathlib import Path\n'), ((303, 33, 303, 59), 'pathlib.Path', 'Path', ({(303, 38, 303, 58): '"""../inquire/agents/"""'}, {}), "('../inquire/agents/')", False, 'from pathlib import Path\n'), ((311, 23, 311, 30), 'pathlib.Path', 'Path', ({(311, 28, 311, 29): 'c'}, {}), '(c)', False, 'from pathlib import Path\n'), ((326, 32, 326, 51), 'numpy.array', 'np.array', ({(326, 41, 326, 50): 'phi_demos'}, {}), '(phi_demos)', True, 'import numpy as np\n'), ((379, 29, 379, 61), 'numpy.zeros', 'np.zeros', ({(379, 38, 379, 60): '(1, self.dim_features)'}, {}), '((1, self.dim_features))', True, 'import numpy as np\n'), ((559, 20, 559, 30), 'pymc3.Model', 'pm.Model', ({}, {}), '()', True, 'import pymc3 as pm\n'), ((693, 20, 693, 39), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n'), ((862, 18, 862, 37), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n'), ((292, 30, 292, 52), 'numpy.linalg.norm', 'np.linalg.norm', ({(292, 45, 292, 51): 'mean_w'}, {}), '(mean_w)', True, 'import numpy as np\n'), ((305, 34, 305, 57), 'pathlib.Path.iterdir', 'Path.iterdir', ({(305, 47, 305, 56): 'data_path'}, {}), '(data_path)', False, 'from pathlib import Path\n'), ((307, 12, 307, 64), 'numpy.argwhere', 'np.argwhere', ({(307, 24, 307, 63): "[(f.suffix == '.csv') for f in all_files]"}, {}), "([(f.suffix == '.csv') for f in all_files])", True, 'import numpy as np\n'), ((415, 34, 415, 50), 'numpy.array', 'np.array', ({(415, 43, 415, 49): 'result'}, {}), '(result)', True, 'import numpy as np\n'), ((522, 29, 524, 17), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((536, 16, 536, 36), 'arviz.plot_trace', 'az.plot_trace', ({(536, 30, 536, 35): 'trace'}, {}), '(trace)', True, 'import arviz as az\n'), ((537, 16, 537, 26), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((539, 16, 539, 37), 'arviz.plot_energy', 'az.plot_energy', ({(539, 31, 539, 36): 'trace'}, {}), '(trace)', True, 'import arviz as az\n'), ((540, 16, 540, 26), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((542, 16, 542, 40), 'arviz.plot_posterior', 'az.plot_posterior', ({(542, 34, 542, 39): 'trace'}, {}), '(trace)', True, 'import arviz as az\n'), ((543, 16, 543, 26), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((562, 23, 568, 17), 'pymc3.Uniform', 'pm.Uniform', (), '', True, 'import pymc3 as pm\n'), ((708, 27, 708, 45), 'numpy.array', 'np.array', ({(708, 36, 708, 44): 'controls'}, {}), '(controls)', True, 'import numpy as np\n'), ((713, 41, 715, 17), 'numpy.zeros', 'np.zeros', ({(714, 20, 714, 56): '(domain.w_dim, self.num_new_queries)'}, {}), '((domain.w_dim, self.num_new_queries))', True, 'import numpy as np\n'), ((759, 23, 759, 46), 'numpy.min', 'np.min', ({(759, 30, 759, 45): 'volumes_removed'}, {}), '(volumes_removed)', True, 'import numpy as np\n'), ((788, 23, 788, 46), 'numpy.min', 'np.min', ({(788, 30, 788, 45): 'volumes_removed'}, {}), '(volumes_removed)', True, 'import numpy as np\n'), ((833, 23, 833, 46), 'numpy.min', 'np.min', ({(833, 30, 833, 45): 'volumes_removed'}, {}), '(volumes_removed)', True, 'import numpy as np\n'), ((875, 16, 875, 60), 'inquire.interactions.feedback.Trajectory', 'Trajectory', ({(875, 27, 875, 46): 'raw_trajectories[i]', (875, 48, 875, 59): 'raw_phis[i]'}, {}), '(raw_trajectories[i], raw_phis[i])', False, 'from inquire.interactions.feedback import Query, Trajectory\n'), ((293, 19, 293, 46), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((525, 42, 525, 68), 'numpy.linalg.norm', 'np.linalg.norm', ({(525, 57, 525, 67): 'test_value'}, {}), '(test_value)', True, 'import numpy as np\n'), ((588, 28, 593, 21), 'pymc3.sample', 'pm.sample', (), '', True, 'import pymc3 as pm\n'), ((748, 35, 750, 21), 'numpy.array', 'np.array', ({(749, 24, 749, 59): '[(f - features[i]) for f in features]'}, {}), '([(f - features[i]) for f in features])', True, 'import numpy as np\n'), ((847, 19, 851, 17), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((551, 38, 551, 55), 'numpy.linalg.norm', 'np.linalg.norm', ({(551, 53, 551, 54): 'r'}, {}), '(r)', True, 'import numpy as np\n'), ((575, 24, 575, 46), 'pymc3.math.gt', 'pm.math.gt', ({(575, 35, 575, 40): 'w_sum', (575, 42, 575, 45): '1.0'}, {}), '(w_sum, 1.0)', True, 'import pymc3 as pm\n'), ((809, 27, 809, 56), 'numpy.dot', 'np.dot', ({(809, 34, 809, 42): 'features', (809, 44, 809, 55): 'w_samples.T'}, {}), '(features, w_samples.T)', True, 'import numpy as np\n'), ((573, 28, 573, 42), 'pymc3.math.sqr', 'pm.math.sqr', ({(573, 40, 573, 41): 'w'}, {}), '(w)', True, 'import pymc3 as pm\n'), ((752, 31, 752, 64), 'numpy.dot', 'np.dot', ({(752, 38, 752, 50): 'feature_diff', (752, 52, 752, 63): 'w_samples.T'}, {}), '(feature_diff, w_samples.T)', True, 'import numpy as np\n'), ((781, 31, 781, 64), 'numpy.dot', 'np.dot', ({(781, 38, 781, 50): 'feature_diff', (781, 52, 781, 63): 'w_samples.T'}, {}), '(feature_diff, w_samples.T)', True, 'import numpy as np\n'), ((785, 29, 785, 75), 'numpy.exp', 'np.exp', ({(785, 36, 785, 74): '(self.beta_pref * weighted_feature_diff)'}, {}), '(self.beta_pref * weighted_feature_diff)', True, 'import numpy as np\n'), ((456, 41, 456, 77), 'theano.tensor.dot', 'tt.dot', ({(456, 48, 456, 62): 'self.phi_demos', (456, 64, 456, 76): 'distribution'}, {}), '(self.phi_demos, distribution)', True, 'import theano.tensor as tt\n'), ((756, 24, 756, 70), 'numpy.exp', 'np.exp', ({(756, 31, 756, 69): '(self.beta_pref * weighted_feature_diff)'}, {}), '(self.beta_pref * weighted_feature_diff)', True, 'import numpy as np\n'), ((478, 41, 478, 77), 'theano.tensor.dot', 'tt.dot', ({(478, 48, 478, 62): 'self.phi_demos', (478, 64, 478, 76): 'distribution'}, {}), '(self.phi_demos, distribution)', True, 'import theano.tensor as tt\n'), ((451, 34, 451, 73), 'theano.tensor.dot', 'tt.dot', ({(451, 41, 451, 58): 'self.phi_prefs[i]', (451, 60, 451, 72): 'distribution'}, {}), '(self.phi_prefs[i], distribution)', True, 'import theano.tensor as tt\n'), ((514, 30, 514, 66), 'theano.tensor.dot', 'tt.dot', ({(514, 37, 514, 51): 'self.phi_demos', (514, 53, 514, 65): 'distribution'}, {}), '(self.phi_demos, distribution)', True, 'import theano.tensor as tt\n'), ((827, 36, 827, 68), 'numpy.array', 'np.array', ({(827, 45, 827, 67): 'exp_rewards_sorted[i:]'}, {}), '(exp_rewards_sorted[i:])', True, 'import numpy as np\n'), ((469, 42, 471, 41), 'theano.tensor.dot', 'tt.dot', ({(470, 44, 470, 61): 'self.phi_prefs[i]', (470, 63, 470, 75): 'distribution'}, {}), '(self.phi_prefs[i], distribution)', True, 'import theano.tensor as tt\n'), ((494, 54, 500, 53), 'theano.tensor.dot', 'tt.dot', ({(495, 56, 498, 78): '(self.phi_prefs[i][j:, :] - self.phi_prefs[i][j])', (499, 56, 499, 68): 'distribution'}, {}), '(self.phi_prefs[i][j:, :] - self.phi_prefs[i][j], distribution)', True, 'import theano.tensor as tt\n')] |
mrucker/banditbenchmark | coba/learners/__init__.py | 0365291b3a0cf1d862d294e0386d0ccad3f360f1 | """This module contains all public learners and learner interfaces."""
from coba.learners.primitives import Learner, SafeLearner
from coba.learners.bandit import EpsilonBanditLearner, UcbBanditLearner, FixedLearner, RandomLearner
from coba.learners.corral import CorralLearner
from coba.learners.vowpal import VowpalMediator
from coba.learners.vowpal import VowpalArgsLearner, VowpalEpsilonLearner, VowpalSoftmaxLearner, VowpalBagLearner
from coba.learners.vowpal import VowpalCoverLearner, VowpalRegcbLearner, VowpalSquarecbLearner, VowpalOffPolicyLearner
from coba.learners.linucb import LinUCBLearner
__all__ = [
'Learner',
'SafeLearner',
'RandomLearner',
'FixedLearner',
'EpsilonBanditLearner',
'UcbBanditLearner',
'CorralLearner',
'LinUCBLearner',
'VowpalArgsLearner',
'VowpalEpsilonLearner',
'VowpalSoftmaxLearner',
'VowpalBagLearner',
'VowpalCoverLearner',
'VowpalRegcbLearner',
'VowpalSquarecbLearner',
'VowpalOffPolicyLearner',
'VowpalMediator'
] | [] |
Lenus254/personal_blog | virtual/lib/python3.8/site-packages/dns/zonefile.py | aac38e4b5372c86efa8e24db2e051fef8e5feef8 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
import re
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rdtypes.ANY.SOA
import dns.rrset
import dns.tokenizer
import dns.transaction
import dns.ttl
import dns.grange
class UnknownOrigin(dns.exception.DNSException):
"""Unknown origin"""
class CNAMEAndOtherData(dns.exception.DNSException):
"""A node has a CNAME and other data"""
def _check_cname_and_other_data(txn, name, rdataset):
rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)
node = txn.get_node(name)
if node is None:
# empty nodes are neutral.
return
node_kind = node.classify()
if node_kind == dns.node.NodeKind.CNAME and \
rdataset_kind == dns.node.NodeKind.REGULAR:
raise CNAMEAndOtherData('rdataset type is not compatible with a '
'CNAME node')
elif node_kind == dns.node.NodeKind.REGULAR and \
rdataset_kind == dns.node.NodeKind.CNAME:
raise CNAMEAndOtherData('CNAME rdataset is not compatible with a '
'regular data node')
# Otherwise at least one of the node and the rdataset is neutral, so
# adding the rdataset is ok
class Reader:
"""Read a DNS zone file into a transaction."""
def __init__(self, tok, rdclass, txn, allow_include=False,
allow_directives=True, force_name=None,
force_ttl=None, force_rdclass=None, force_rdtype=None,
default_ttl=None):
self.tok = tok
(self.zone_origin, self.relativize, _) = \
txn.manager.origin_information()
self.current_origin = self.zone_origin
self.last_ttl = 0
self.last_ttl_known = False
if force_ttl is not None:
default_ttl = force_ttl
if default_ttl is None:
self.default_ttl = 0
self.default_ttl_known = False
else:
self.default_ttl = default_ttl
self.default_ttl_known = True
self.last_name = self.current_origin
self.zone_rdclass = rdclass
self.txn = txn
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.allow_directives = allow_directives
self.force_name = force_name
self.force_ttl = force_ttl
self.force_rdclass = force_rdclass
self.force_rdtype = force_rdtype
self.txn.check_put_rdataset(_check_cname_and_other_data)
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _get_identifier(self):
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
return token
def _rr_line(self):
"""Process one line from a DNS zone file."""
token = None
# Name
if self.force_name is not None:
name = self.force_name
else:
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading=True)
if not token.is_whitespace():
self.last_name = self.tok.as_name(token, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
# TTL
if self.force_ttl is not None:
ttl = self.force_ttl
self.last_ttl = ttl
self.last_ttl_known = True
else:
token = self._get_identifier()
ttl = None
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = None
except dns.ttl.BadTTL:
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
self.tok.unget(token)
# Class
if self.force_rdclass is not None:
rdclass = self.force_rdclass
else:
token = self._get_identifier()
try:
rdclass = dns.rdataclass.from_text(token.value)
except dns.exception.SyntaxError:
raise
except Exception:
rdclass = self.zone_rdclass
self.tok.unget(token)
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
if self.force_rdtype is not None:
rdtype = self.force_rdtype
else:
token = self._get_identifier()
try:
rdtype = dns.rdatatype.from_text(token.value)
except Exception:
raise dns.exception.SyntaxError(
"unknown rdatatype '%s'" % token.value)
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError(
"caught exception {}: {}".format(str(ty), str(va)))
if not self.default_ttl_known and rdtype == dns.rdatatype.SOA:
# The pre-RFC2308 and pre-BIND9 behavior inherits the zone default
# TTL from the SOA minttl if no $TTL statement is present before the
# SOA is parsed.
self.default_ttl = rd.minimum
self.default_ttl_known = True
if ttl is None:
# if we didn't have a TTL on the SOA, set it!
ttl = rd.minimum
# TTL check. We had to wait until now to do this as the SOA RR's
# own TTL can be inferred from its minimum.
if ttl is None:
raise dns.exception.SyntaxError("Missing default TTL value")
self.txn.add(name, ttl, rd)
def _parse_modify(self, side):
# Here we catch everything in '{' '}' in a group so we can replace it
# with ''.
is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$")
is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$")
# Sometimes there are modifiers in the hostname. These come after
# the dollar sign. They are in the form: ${offset[,width[,base]]}.
# Make names
g1 = is_generate1.match(side)
if g1:
mod, sign, offset, width, base = g1.groups()
if sign == '':
sign = '+'
g2 = is_generate2.match(side)
if g2:
mod, sign, offset = g2.groups()
if sign == '':
sign = '+'
width = 0
base = 'd'
g3 = is_generate3.match(side)
if g3:
mod, sign, offset, width = g3.groups()
if sign == '':
sign = '+'
base = 'd'
if not (g1 or g2 or g3):
mod = ''
sign = '+'
offset = 0
width = 0
base = 'd'
if base != 'd':
raise NotImplementedError()
return mod, sign, offset, width, base
def _generate_line(self):
# range lhs [ttl] [class] type rhs [ comment ]
"""Process one line containing the GENERATE statement from a DNS
zone file."""
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get()
# Range (required)
try:
start, stop, step = dns.grange.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# lhs (required)
try:
lhs = token.value
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
if not (self.last_ttl_known or self.default_ttl_known):
raise dns.exception.SyntaxError("Missing default TTL value")
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = self.zone_rdclass
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
token.value)
# rhs (required)
rhs = token.value
# The code currently only supports base 'd', so the last value
# in the tuple _parse_modify returns is ignored
lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs)
rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs)
for i in range(start, stop + 1, step):
# +1 because bind is inclusive and python is exclusive
if lsign == '+':
lindex = i + int(loffset)
elif lsign == '-':
lindex = i - int(loffset)
if rsign == '-':
rindex = i - int(roffset)
elif rsign == '+':
rindex = i + int(roffset)
lzfindex = str(lindex).zfill(int(lwidth))
rzfindex = str(rindex).zfill(int(rwidth))
name = lhs.replace('$%s' % (lmod), lzfindex)
rdata = rhs.replace('$%s' % (rmod), rzfindex)
self.last_name = dns.name.from_text(name, self.current_origin,
self.tok.idna_codec)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
try:
rd = dns.rdata.from_text(rdclass, rdtype, rdata,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" %
(str(ty), str(va)))
self.txn.add(name, ttl, rd)
def read(self):
"""Read a DNS zone file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token.is_eof():
if self.current_file is not None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$' and self.allow_directives:
c = token.value.upper()
if c == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.default_ttl = dns.ttl.from_text(token.value)
self.default_ttl_known = True
self.tok.get_eol()
elif c == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone_origin is None:
self.zone_origin = self.current_origin
self.txn._set_origin(self.current_origin)
elif c == '$INCLUDE' and self.allow_include:
token = self.tok.get()
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin =\
dns.name.from_text(token.value,
self.current_origin,
self.tok.idna_codec)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
"bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known))
self.current_file = open(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
elif c == '$GENERATE':
self._generate_line()
else:
raise dns.exception.SyntaxError(
"Unknown zone file directive '" + c + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError as detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
ex = dns.exception.SyntaxError(
"%s:%d: %s" % (filename, line_number, detail))
tb = sys.exc_info()[2]
raise ex.with_traceback(tb) from None
class RRsetsReaderTransaction(dns.transaction.Transaction):
def __init__(self, manager, replacement, read_only):
assert not read_only
super().__init__(manager, replacement, read_only)
self.rdatasets = {}
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _get_node(self, name):
rdatasets = []
for (rdataset_name, _, _), rdataset in self.rdatasets.items():
if name == rdataset_name:
rdatasets.append(rdataset)
if len(rdatasets) == 0:
return None
node = dns.node.Node()
node.rdatasets = rdatasets
return node
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
# First remove any changes involving the name
remove = []
for key in self.rdatasets:
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
try:
del self.rdatasets[(name, rdtype, covers)]
except KeyError:
pass
def _name_exists(self, name):
for (n, _, _) in self.rdatasets:
if n == name:
return True
return False
def _changed(self):
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit and self._changed():
rrsets = []
for (name, _, _), rdataset in self.rdatasets.items():
rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype,
rdataset.covers)
rrset.update(rdataset)
rrsets.append(rrset)
self.manager.set_rrsets(rrsets)
def _set_origin(self, origin):
pass
class RRSetsReaderManager(dns.transaction.TransactionManager):
def __init__(self, origin=dns.name.root, relativize=False,
rdclass=dns.rdataclass.IN):
self.origin = origin
self.relativize = relativize
self.rdclass = rdclass
self.rrsets = []
def writer(self, replacement=False):
assert replacement is True
return RRsetsReaderTransaction(self, True, False)
def get_class(self):
return self.rdclass
def origin_information(self):
if self.relativize:
effective = dns.name.empty
else:
effective = self.origin
return (self.origin, self.relativize, effective)
def set_rrsets(self, rrsets):
self.rrsets = rrsets
def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN,
default_rdclass=dns.rdataclass.IN,
rdtype=None, default_ttl=None, idna_codec=None,
origin=dns.name.root, relativize=False):
"""Read one or more rrsets from the specified text, possibly subject
to restrictions.
*text*, a file object or a string, is the input to process.
*name*, a string, ``dns.name.Name``, or ``None``, is the owner name of
the rrset. If not ``None``, then the owner name is "forced", and the
input must not specify an owner name. If ``None``, then any owner names
are allowed and must be present in the input.
*ttl*, an ``int``, string, or None. If not ``None``, the the TTL is
forced to be the specified value and the input must not specify a TTL.
If ``None``, then a TTL may be specified in the input. If it is not
specified, then the *default_ttl* will be used.
*rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If
not ``None``, then the class is forced to the specified value, and the
input must not specify a class. If ``None``, then the input may specify
a class that matches *default_rdclass*. Note that it is not possible to
return rrsets with differing classes; specifying ``None`` for the class
simply allows the user to optionally type a class as that may be convenient
when cutting and pasting.
*default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class
of the returned rrsets.
*rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not
``None``, then the type is forced to the specified value, and the
input must not specify a type. If ``None``, then a type must be present
for each RR.
*default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if
the TTL is not forced and is not specified, then this value will be used.
if ``None``, then if the TTL is not forced an error will occur if the TTL
is not specified.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used. Note that codecs only apply to the owner name; dnspython does
not do IDNA for names in rdata, as there is no IDNA zonefile format.
*origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any
relative names in the input, and also the origin to relativize to if
*relativize* is ``True``.
*relativize*, a bool. If ``True``, names are relativized to the *origin*;
if ``False`` then any relative names in the input are made absolute by
appending the *origin*.
"""
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root, idna_codec)
if isinstance(name, str):
name = dns.name.from_text(name, origin, idna_codec)
if isinstance(ttl, str):
ttl = dns.ttl.from_text(ttl)
if isinstance(default_ttl, str):
default_ttl = dns.ttl.from_text(default_ttl)
if rdclass is not None:
rdclass = dns.rdataclass.RdataClass.make(rdclass)
default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)
if rdtype is not None:
rdtype = dns.rdatatype.RdataType.make(rdtype)
manager = RRSetsReaderManager(origin, relativize, default_rdclass)
with manager.writer(True) as txn:
tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec)
reader = Reader(tok, default_rdclass, txn, allow_directives=False,
force_name=name, force_ttl=ttl, force_rdclass=rdclass,
force_rdtype=rdtype, default_ttl=default_ttl)
reader.read()
return manager.rrsets
| [((219, 23, 219, 72), 're.compile', 're.compile', ({(219, 34, 219, 71): '"""^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+),(.)}).*$"""'}, {}), "('^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+),(.)}).*$')", False, 'import re\n'), ((220, 23, 220, 62), 're.compile', 're.compile', ({(220, 34, 220, 61): '"""^.*\\\\$({(\\\\+|-?)(\\\\d+)}).*$"""'}, {}), "('^.*\\\\$({(\\\\+|-?)(\\\\d+)}).*$')", False, 'import re\n'), ((221, 23, 221, 68), 're.compile', 're.compile', ({(221, 34, 221, 67): '"""^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+)}).*$"""'}, {}), "('^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+)}).*$')", False, 'import re\n'), ((195, 23, 195, 37), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((458, 17, 458, 31), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((367, 27, 367, 41), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n')] |
fossabot/swift-1 | swift/common/daemon.py | 63fc013b8b96484cede0e9901ad54676b8c93298 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import sys
import time
import signal
from re import sub
import eventlet.debug
from eventlet.hubs import use_hub
from swift.common import utils
class Daemon(object):
"""
Daemon base class
A daemon has a run method that accepts a ``once`` kwarg and will dispatch
to :meth:`run_once` or :meth:`run_forever`.
A subclass of Daemon must implement :meth:`run_once` and
:meth:`run_forever`.
A subclass of Daemon may override :meth:`get_worker_args` to dispatch
arguments to individual child process workers and :meth:`is_healthy` to
perform context specific periodic wellness checks which can reset worker
arguments.
Implementations of Daemon do not know *how* to daemonize, or execute
multiple daemonized workers, they simply provide the behavior of the daemon
and context specific knowledge about how workers should be started.
"""
def __init__(self, conf):
self.conf = conf
self.logger = utils.get_logger(conf, log_route='daemon')
def run_once(self, *args, **kwargs):
"""Override this to run the script once"""
raise NotImplementedError('run_once not implemented')
def run_forever(self, *args, **kwargs):
"""Override this to run forever"""
raise NotImplementedError('run_forever not implemented')
def run(self, once=False, **kwargs):
if once:
self.run_once(**kwargs)
else:
self.run_forever(**kwargs)
def post_multiprocess_run(self):
"""
Override this to do something after running using multiple worker
processes. This method is called in the parent process.
This is probably only useful for run-once mode since there is no
"after running" in run-forever mode.
"""
pass
def get_worker_args(self, once=False, **kwargs):
"""
For each worker yield a (possibly empty) dict of kwargs to pass along
to the daemon's :meth:`run` method after fork. The length of elements
returned from this method will determine the number of processes
created.
If the returned iterable is empty, the Strategy will fallback to
run-inline strategy.
:param once: False if the worker(s) will be daemonized, True if the
worker(s) will be run once
:param kwargs: plumbed through via command line argparser
:returns: an iterable of dicts, each element represents the kwargs to
be passed to a single worker's :meth:`run` method after fork.
"""
return []
def is_healthy(self):
"""
This method is called very frequently on the instance of the daemon
held by the parent process. If it returns False, all child workers are
terminated, and new workers will be created.
:returns: a boolean, True only if all workers should continue to run
"""
return True
class DaemonStrategy(object):
"""
This is the execution strategy for using subclasses of Daemon. The default
behavior is to invoke the daemon's :meth:`Daemon.run` method from within
the parent process. When the :meth:`Daemon.run` method returns the parent
process will exit.
However, if the Daemon returns a non-empty iterable from
:meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will
be invoked in child processes, with the arguments provided from the parent
process's instance of the daemon. If a child process exits it will be
restarted with the same options, unless it was executed in once mode.
:param daemon: an instance of a :class:`Daemon` (has a `run` method)
:param logger: a logger instance
"""
def __init__(self, daemon, logger):
self.daemon = daemon
self.logger = logger
self.running = False
# only used by multi-worker strategy
self.options_by_pid = {}
self.unspawned_worker_options = []
def setup(self, **kwargs):
utils.validate_configuration()
utils.drop_privileges(self.daemon.conf.get('user', 'swift'))
utils.clean_up_daemon_hygiene()
utils.capture_stdio(self.logger, **kwargs)
def kill_children(*args):
self.running = False
self.logger.info('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.killpg(0, signal.SIGTERM)
os._exit(0)
signal.signal(signal.SIGTERM, kill_children)
self.running = True
def _run_inline(self, once=False, **kwargs):
"""Run the daemon"""
self.daemon.run(once=once, **kwargs)
def run(self, once=False, **kwargs):
"""Daemonize and execute our strategy"""
self.setup(**kwargs)
try:
self._run(once=once, **kwargs)
except KeyboardInterrupt:
self.logger.notice('User quit')
finally:
self.cleanup()
self.running = False
def _fork(self, once, **kwargs):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.daemon.run(once, **kwargs)
self.logger.debug('Forked worker %s finished', os.getpid())
# do not return from this stack, nor execute any finally blocks
os._exit(0)
else:
self.register_worker_start(pid, kwargs)
return pid
def iter_unspawned_workers(self):
while True:
try:
per_worker_options = self.unspawned_worker_options.pop()
except IndexError:
return
yield per_worker_options
def spawned_pids(self):
return list(self.options_by_pid.keys())
def register_worker_start(self, pid, per_worker_options):
self.logger.debug('Spawned worker %s with %r', pid, per_worker_options)
self.options_by_pid[pid] = per_worker_options
def register_worker_exit(self, pid):
self.unspawned_worker_options.append(self.options_by_pid.pop(pid))
def ask_daemon_to_prepare_workers(self, once, **kwargs):
self.unspawned_worker_options = list(
self.daemon.get_worker_args(once=once, **kwargs))
def abort_workers_if_daemon_would_like(self):
if not self.daemon.is_healthy():
self.logger.debug(
'Daemon needs to change options, aborting workers')
self.cleanup()
return True
return False
def check_on_all_running_workers(self):
for p in self.spawned_pids():
try:
pid, status = os.waitpid(p, os.WNOHANG)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
self.logger.notice('Worker %s died', p)
else:
if pid == 0:
# child still running
continue
self.logger.debug('Worker %s exited', p)
self.register_worker_exit(p)
def _run(self, once, **kwargs):
self.ask_daemon_to_prepare_workers(once, **kwargs)
if not self.unspawned_worker_options:
return self._run_inline(once, **kwargs)
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
while self.running:
if self.abort_workers_if_daemon_would_like():
self.ask_daemon_to_prepare_workers(once, **kwargs)
self.check_on_all_running_workers()
if not once:
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
else:
if not self.spawned_pids():
self.logger.notice('Finished %s', os.getpid())
break
time.sleep(0.1)
self.daemon.post_multiprocess_run()
return 0
def cleanup(self):
for p in self.spawned_pids():
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD):
raise
self.register_worker_exit(p)
self.logger.debug('Cleaned up worker %s', p)
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
| [((296, 4, 296, 39), 'swift.common.utils.modify_priority', 'utils.modify_priority', ({(296, 26, 296, 30): 'conf', (296, 32, 296, 38): 'logger'}, {}), '(conf, logger)', False, 'from swift.common import utils\n'), ((312, 4, 312, 16), 'time.tzset', 'time.tzset', ({}, {}), '()', False, 'import time\n'), ((51, 22, 51, 64), 'swift.common.utils.get_logger', 'utils.get_logger', (), '', False, 'from swift.common import utils\n'), ((133, 8, 133, 38), 'swift.common.utils.validate_configuration', 'utils.validate_configuration', ({}, {}), '()', False, 'from swift.common import utils\n'), ((135, 8, 135, 39), 'swift.common.utils.clean_up_daemon_hygiene', 'utils.clean_up_daemon_hygiene', ({}, {}), '()', False, 'from swift.common import utils\n'), ((136, 8, 136, 50), 'swift.common.utils.capture_stdio', 'utils.capture_stdio', ({(136, 28, 136, 39): 'self.logger'}, {}), '(self.logger, **kwargs)', False, 'from swift.common import utils\n'), ((145, 8, 145, 52), 'signal.signal', 'signal.signal', ({(145, 22, 145, 36): 'signal.SIGTERM', (145, 38, 145, 51): 'kill_children'}, {}), '(signal.SIGTERM, kill_children)', False, 'import signal\n'), ((164, 14, 164, 23), 'os.fork', 'os.fork', ({}, {}), '()', False, 'import os\n'), ((282, 12, 282, 27), 'swift.common.utils.get_hub', 'utils.get_hub', ({}, {}), '()', False, 'from swift.common import utils\n'), ((300, 8, 300, 33), 'swift.common.utils.disable_fallocate', 'utils.disable_fallocate', ({}, {}), '()', False, 'from swift.common import utils\n'), ((314, 33, 314, 44), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((319, 31, 319, 42), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((141, 12, 141, 57), 'signal.signal', 'signal.signal', ({(141, 26, 141, 40): 'signal.SIGTERM', (141, 42, 141, 56): 'signal.SIG_IGN'}, {}), '(signal.SIGTERM, signal.SIG_IGN)', False, 'import signal\n'), ((142, 12, 142, 40), 'os.killpg', 'os.killpg', ({(142, 22, 142, 23): '(0)', (142, 25, 142, 39): 'signal.SIGTERM'}, {}), '(0, signal.SIGTERM)', False, 'import os\n'), ((143, 12, 143, 23), 'os._exit', 'os._exit', ({(143, 21, 143, 22): '(0)'}, {}), '(0)', False, 'import os\n'), ((166, 12, 166, 56), 'signal.signal', 'signal.signal', ({(166, 26, 166, 39): 'signal.SIGHUP', (166, 41, 166, 55): 'signal.SIG_DFL'}, {}), '(signal.SIGHUP, signal.SIG_DFL)', False, 'import signal\n'), ((167, 12, 167, 57), 'signal.signal', 'signal.signal', ({(167, 26, 167, 40): 'signal.SIGTERM', (167, 42, 167, 56): 'signal.SIG_DFL'}, {}), '(signal.SIGTERM, signal.SIG_DFL)', False, 'import signal\n'), ((173, 12, 173, 23), 'os._exit', 'os._exit', ({(173, 21, 173, 22): '(0)'}, {}), '(0)', False, 'import os\n'), ((242, 12, 242, 27), 'time.sleep', 'time.sleep', ({(242, 23, 242, 26): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((280, 8, 280, 19), 'sys.exit', 'sys.exit', ({(280, 17, 280, 18): 'e'}, {}), '(e)', False, 'import sys\n'), ((171, 59, 171, 70), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((211, 30, 211, 55), 'os.waitpid', 'os.waitpid', ({(211, 41, 211, 42): 'p', (211, 44, 211, 54): 'os.WNOHANG'}, {}), '(p, os.WNOHANG)', False, 'import os\n'), ((249, 16, 249, 42), 'os.kill', 'os.kill', ({(249, 24, 249, 25): 'p', (249, 27, 249, 41): 'signal.SIGTERM'}, {}), '(p, signal.SIGTERM)', False, 'import os\n'), ((272, 23, 273, 42), 're.sub', 'sub', ({(272, 27, 272, 44): '"""([a-z])([A-Z])"""', (272, 46, 272, 54): '"""\\\\1-\\\\2"""', (273, 27, 273, 41): 'klass.__name__'}, {}), "('([a-z])([A-Z])', '\\\\1-\\\\2', klass.__name__)", False, 'from re import sub\n'), ((240, 54, 240, 65), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n')] |
Bhaskers-Blu-Org1/multicloud-incident-response-navigator | backend/resource_files_sample.py | e6ba6322fdcc533b6ed14abb4681470a6bb6bd85 | import resource_files
resources = resource_files.ResourceFiles()
# sample use case of getting yamls
print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster"))
# sample use case of getting events
print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a'))
# sample use case of getting describe info
print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
| [((3, 12, 3, 42), 'resource_files.ResourceFiles', 'resource_files.ResourceFiles', ({}, {}), '()', False, 'import resource_files\n')] |
aroraenterprise/projecteos | backend/api/v1/auth_module/auth_api.py | e1fb0438af8cb59b77792523c6616c480b23a6f8 | """
Project: flask-rest
Author: Saj Arora
Description: Handle auth endpoints such as auth/signup, auth/login
"""
from api.v1 import make_json_ok_response, SageController, SageMethod
from api.v1.fundamentals import helper
from .auth_controller import AuthController
def sage_auth_signup_function(self, resource, **kwargs):
_UserModel = resource.get_account_model()
args = helper.parse_args_for_model(_UserModel)
user = _UserModel(**args) # user has been created
user.put() # save to get a key for the user
result, params = AuthController.create_unique_for_user(user.key)
if not result: # not successful
user.key.delete()
raise params # this holds the error message
else:
return params # this holds accesskey and refresh token
def sage_auth_authenticate_function(self, resource, **kwargs):
result, params = AuthController.authenticate_client()
if not result: # not successful
raise params # this holds the error message
else:
return params # this holds the refresh token and the access token
auth_controller = {
'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False),
'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False)
} | [((13, 11, 13, 50), 'api.v1.fundamentals.helper.parse_args_for_model', 'helper.parse_args_for_model', ({(13, 39, 13, 49): '_UserModel'}, {}), '(_UserModel)', False, 'from api.v1.fundamentals import helper\n'), ((34, 14, 34, 92), 'api.v1.SageController', 'SageController', (), '', False, 'from api.v1 import make_json_ok_response, SageController, SageMethod\n'), ((35, 20, 35, 104), 'api.v1.SageController', 'SageController', (), '', False, 'from api.v1 import make_json_ok_response, SageController, SageMethod\n')] |
AngsarM/QuanGuru | tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py | 5db6105f843bbc78c2d5b1547e32d494fbe10b8d | import random as rn
import numpy as np
# open system dynamics of a qubit and compare numerical results with the analytical calculations
# NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical
# calculations.
# currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving
# any collapse operators. For now, only looks at excited state populations
# TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics.
decayRateSM = rn.random()
excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)
populations = {'excitedAnalytical':[], 'excitedNumerical':[]}
# this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every
# step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
def singleQubitDecayCalculate(qub, state, i):
populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize))
populations['excitedNumerical'].append(state[0, 0])
def test_qubitUnitaryEvolutionFromLiouville(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
def test_qubitDecay(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
| [((13, 14, 13, 25), 'random.random', 'rn.random', ({}, {}), '()', True, 'import random as rn\n'), ((15, 34, 15, 78), 'numpy.exp', 'np.exp', ({(15, 41, 15, 77): '(-(1e-05 * (decayRateSM + 1) * 2 + 1.0j) * 50 * t)'}, {}), '(-(1e-05 * (decayRateSM + 1) * 2 + 1.0j) * 50 * t)', True, 'import numpy as np\n')] |
Mohamed-ShehabEldin/QuGraphy | QuGraphy/state.py | c43fe7128f91e7bd383393f5ff16ff613077e8d7 | #this file will contain function that related to vector state
from .density import * #we may use some functions from them and dependencies
def row2col(vec):
if np.ndim(vec)==1:
col=[]
for element in vec:
col.append([element])
return col
else:
return vec
def check_state(state):
row2col(state)
if np.shape(state)[1]>1:
raise Exception("invalid state, not a vector!")
if schmidt_inner(state,state) !=1:
raise Exception("invalid state, not normalized!") | [] |
GeoscienceAustralia/uncoverml | uncoverml/metadata_profiler.py | 672914377afa4ad1c069fcd4845bc45f80132e36 | #! /usr/bin/env python
"""
Description:
Gather Metadata for the uncover-ml prediction output results:
Reference: email 2019-05-24
Overview
Creator: (person who generated the model)
Model;
Name:
Type and date:
Algorithm:
Extent: Lat/long - location on Australia map?
SB Notes: None of the above is required as this information will be captured in the yaml file.
Model inputs:
1. Covariates - list (in full)
2. Targets: path to shapefile: csv file
SB Notes: Only covaraite list file. Targets and path to shapefile is not required as this is available in the yaml file. May be the full path to the shapefile has some merit as one can specify partial path.
Model performance
JSON file (in full)
SB Notes: Yes
Model outputs
1. Prediction grid including path
2. Quantiles Q5; Q95
3. Variance:
4. Entropy:
5. Feature rank file
6. Raw covariates file (target value - covariate value)
7. Optimisation output
8. Others ??
SB Notes: Not required as these are model dependent, and the metadata will be contained in each of the output geotif file.
Model parameters:
1. YAML file (in full)
2. .SH file (in full)
SB Notes: The .sh file is not required. YAML file is read as a python dictionary in uncoverml which can be dumped in the metadata.
CreationDate: 31/05/19
Developer: [email protected]
Revision History:
LastUpdate: 31/05/19 FZ
LastUpdate: dd/mm/yyyy Who Optional description
"""
# import section
import os
import sys
import json
import pickle
import datetime
import getpass
import socket
from ppretty import ppretty
import uncoverml
class MetadataSummary():
"""
Summary Description of the ML prediction output
"""
def __init__(self, model, config):
self.model = model
self.description = "Metadata for the ML results"
username = getpass.getuser()
hostname = socket.gethostname()
self.creator = username
self.computename = hostname
self.datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.version = uncoverml.__version__
model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,
show_address=False, str_length=50)
self.config = config
self.name = self.config.name # 'demo_regression'
self.algorithm = self.config.algorithm # 'svr'
self.extent = ((-10, 100),(-40, 140))
if config.cross_validate and os.path.exists(config.crossval_scores_file):
with open(config.crossval_scores_file) as sf:
self.model_performance_metrics = json.load(sf)
else:
self.model_performance_metrics = None
def write_metadata(self, out_filename):
"""
write the metadata for this prediction result, into a human-readable txt file.
in order to make the ML results traceable and reproduceable (provenance)
"""
with open(out_filename, 'w') as outf:
outf.write("# Metadata Profile for the Prediction Results")
outf.write("\n\n############ Software Environment ###########\n\n")
outf.write("Creator = %s \n"%self.creator)
outf.write("Computer = %s \n"%self.computename)
outf.write("ML Algorithm = %s \n"%self.algorithm)
outf.write("Version = %s\n"%self.version)
outf.write("Datetime = %s \n"%self.datetime)
outf.write("\n\n############ Performance Matrics ###########\n\n")
if self.model_performance_metrics:
for keys, values in self.model_performance_metrics.items():
outf.write("%s = %s\n"%(keys, values))
outf.write("\n\n############ Configuration ###########\n\n")
conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200,
show_protected=True, show_static=True, show_properties=True,
show_address=False, str_length=200)
outf.write(conf_str)
outf.write("\n\n############ Model ###########\n\n")
model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,
show_address=False, str_length=50)
outf.write(model_str)
outf.write("\n\n############ The End of Metadata ###########\n\n")
return out_filename
| [((75, 19, 75, 36), 'getpass.getuser', 'getpass.getuser', ({}, {}), '()', False, 'import getpass\n'), ((76, 19, 76, 39), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((83, 20, 84, 62), 'ppretty.ppretty', 'ppretty', (), '', False, 'from ppretty import ppretty\n'), ((92, 37, 92, 80), 'os.path.exists', 'os.path.exists', ({(92, 52, 92, 79): 'config.crossval_scores_file'}, {}), '(config.crossval_scores_file)', False, 'import os\n'), ((121, 23, 123, 66), 'ppretty.ppretty', 'ppretty', (), '', False, 'from ppretty import ppretty\n'), ((128, 24, 129, 66), 'ppretty.ppretty', 'ppretty', (), '', False, 'from ppretty import ppretty\n'), ((80, 24, 80, 47), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((94, 49, 94, 62), 'json.load', 'json.load', ({(94, 59, 94, 61): 'sf'}, {}), '(sf)', False, 'import json\n')] |
thisisishara/test_pypi_cli | testjpkg/jsonify/hij.py | 15b22ed8943a18a6d9de9ee4ba6a84249a633e2e | print("hiiiiiiiiiiiiiiiix")
def sayhi():
print("2nd pkg said hi")
| [] |
raspbian-packages/pandas | asv_bench/benchmarks/algorithms.py | fb33806b5286deb327b2e0fa96aedf25a6ed563f | import numpy as np
import pandas as pd
from pandas.util import testing as tm
class algorithm(object):
goal_time = 0.2
def setup(self):
N = 100000
self.int_unique = pd.Int64Index(np.arange(N * 5))
# cache is_unique
self.int_unique.is_unique
self.int = pd.Int64Index(np.arange(N).repeat(5))
self.float = pd.Float64Index(np.random.randn(N).repeat(5))
# Convenience naming.
self.checked_add = pd.core.nanops._checked_add_with_arr
self.arr = np.arange(1000000)
self.arrpos = np.arange(1000000)
self.arrneg = np.arange(-1000000, 0)
self.arrmixed = np.array([1, -1]).repeat(500000)
def time_int_factorize(self):
self.int.factorize()
def time_float_factorize(self):
self.int.factorize()
def time_int_unique_duplicated(self):
self.int_unique.duplicated()
def time_int_duplicated(self):
self.int.duplicated()
def time_float_duplicated(self):
self.float.duplicated()
def time_add_overflow_pos_scalar(self):
self.checked_add(self.arr, 1)
def time_add_overflow_neg_scalar(self):
self.checked_add(self.arr, -1)
def time_add_overflow_zero_scalar(self):
self.checked_add(self.arr, 0)
def time_add_overflow_pos_arr(self):
self.checked_add(self.arr, self.arrpos)
def time_add_overflow_neg_arr(self):
self.checked_add(self.arr, self.arrneg)
def time_add_overflow_mixed_arr(self):
self.checked_add(self.arr, self.arrmixed)
class hashing(object):
goal_time = 0.2
def setup(self):
N = 100000
self.df = pd.DataFrame(
{'A': pd.Series(tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=N))),
'B': pd.Series(tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=N))),
'D': np.random.randn(N),
'E': np.arange(N),
'F': pd.date_range('20110101', freq='s', periods=N),
'G': pd.timedelta_range('1 day', freq='s', periods=N),
})
self.df['C'] = self.df['B'].astype('category')
self.df.iloc[10:20] = np.nan
def time_frame(self):
self.df.hash()
def time_series_int(self):
self.df.E.hash()
def time_series_string(self):
self.df.B.hash()
def time_series_categorical(self):
self.df.C.hash()
| [((22, 19, 22, 37), 'numpy.arange', 'np.arange', ({(22, 29, 22, 36): '1000000'}, {}), '(1000000)', True, 'import numpy as np\n'), ((23, 22, 23, 40), 'numpy.arange', 'np.arange', ({(23, 32, 23, 39): '1000000'}, {}), '(1000000)', True, 'import numpy as np\n'), ((24, 22, 24, 44), 'numpy.arange', 'np.arange', ({(24, 32, 24, 40): '-1000000', (24, 42, 24, 43): '0'}, {}), '(-1000000, 0)', True, 'import numpy as np\n'), ((12, 40, 12, 56), 'numpy.arange', 'np.arange', ({(12, 50, 12, 55): 'N * 5'}, {}), '(N * 5)', True, 'import numpy as np\n'), ((25, 24, 25, 41), 'numpy.array', 'np.array', ({(25, 33, 25, 40): '[1, -1]'}, {}), '([1, -1])', True, 'import numpy as np\n'), ((72, 18, 72, 36), 'numpy.random.randn', 'np.random.randn', ({(72, 34, 72, 35): 'N'}, {}), '(N)', True, 'import numpy as np\n'), ((73, 18, 73, 30), 'numpy.arange', 'np.arange', ({(73, 28, 73, 29): 'N'}, {}), '(N)', True, 'import numpy as np\n'), ((74, 18, 74, 64), 'pandas.date_range', 'pd.date_range', (), '', True, 'import pandas as pd\n'), ((75, 18, 75, 66), 'pandas.timedelta_range', 'pd.timedelta_range', (), '', True, 'import pandas as pd\n'), ((16, 33, 16, 45), 'numpy.arange', 'np.arange', ({(16, 43, 16, 44): 'N'}, {}), '(N)', True, 'import numpy as np\n'), ((17, 37, 17, 55), 'numpy.random.randn', 'np.random.randn', ({(17, 53, 17, 54): 'N'}, {}), '(N)', True, 'import numpy as np\n'), ((69, 16, 69, 49), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((71, 17, 71, 52), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((68, 28, 68, 51), 'pandas.util.testing.makeStringIndex', 'tm.makeStringIndex', ({(68, 47, 68, 50): '100'}, {}), '(100)', True, 'from pandas.util import testing as tm\n'), ((70, 28, 70, 53), 'pandas.util.testing.makeStringIndex', 'tm.makeStringIndex', ({(70, 47, 70, 52): '10000'}, {}), '(10000)', True, 'from pandas.util import testing as tm\n')] |
lh-astro/RM-Tools | RMtools_1D/do_RMsynth_1D.py | ac64cc41b2f696f21ee7dd001303cbad1ff71114 | #!/usr/bin/env python
#=============================================================================#
# #
# NAME: do_RMsynth_1D.py #
# #
# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.#
# #
# MODIFIED: 16-Nov-2018 by J. West #
# MODIFIED: 23-October-2019 by A. Thomson #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 - 2018 Cormac R. Purcell #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import sys
import os
import time
import traceback
import json
import math as m
import numpy as np
import matplotlib.pyplot as plt
from RMutils.util_RM import do_rmsynth
from RMutils.util_RM import do_rmsynth_planes
from RMutils.util_RM import get_rmsf_planes
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_RM import measure_qu_complexity
from RMutils.util_RM import measure_fdf_complexity
from RMutils.util_misc import nanmedian
from RMutils.util_misc import toscalar
from RMutils.util_misc import create_frac_spectra
from RMutils.util_misc import poly5
from RMutils.util_misc import MAD
from RMutils.util_plotTk import plot_Ipqu_spectra_fig
from RMutils.util_plotTk import plot_rmsf_fdf_fig
from RMutils.util_plotTk import plot_complexity_fig
from RMutils.util_plotTk import CustomNavbar
from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax
if sys.version_info.major == 2:
print('RM-tools will no longer run with Python 2! Please use Python 3.')
exit()
C = 2.997924538e8 # Speed of light [m/s]
#-----------------------------------------------------------------------------#
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,
nSamples=10.0, weightType="variance", fitRMSF=False,
noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,
debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None):
"""Run RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
if verbose: log("... success.")
except Exception:
if verbose: log("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
if verbose: log("... success.")
noStokesI = True
except Exception:
if verbose: log("...failed.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose: log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose: log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr)/2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fitDict = \
create_frac_spectra(freqArr = freqArr_GHz,
IArr = IArr,
QArr = QArr,
UArr = UArr,
dIArr = dIArr,
dQArr = dQArr,
dUArr = dUArr,
polyOrd = polyOrd,
verbose = True,
debug = debug)
# Plot the data and the Stokes I model fit
if verbose: log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,
IArr = IArr,
qArr = qArr,
uArr = uArr,
dIArr = dIArr,
dqArr = dqArr,
duArr = duArr,
freqHirArr_Hz = freqHirArr_Hz,
IModArr = IModHirArr,
fig = specFig,
units = units)
# Use the custom navigation toolbar (does not work on Mac OS X)
# try:
# specFig.canvas.toolbar.pack_forget()
# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# if not plt.isinteractive():
# specFig.show()
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,
label='rms <QU>')
ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,
label='rms Q')
ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,
label='rms U')
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('RMS '+units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# rmsFig.show()
#-------------------------------------------------------------------------#
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -
np.nanmin(lambdaSqArr_m2) )
dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0],
phiArr_radm2[-1],
float(dPhi_radm2),
nChanRM))
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType=="variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose: log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,
dataU = uArr,
lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
nBits = nBits,
verbose = verbose,
log = log)
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
mskArr = ~np.isfinite(qArr),
lam0Sq_m2 = lam0Sq_m2,
double = True,
fitRMSF = fitRMSF,
fitRMSFreal = False,
nBits = nBits,
verbose = verbose,
log = log)
fwhmRMSF = float(fwhmRMSFArr)
# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#
#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)
#-------------------------------------------------------------------------#
endTime = time.time()
cputime = (endTime - startTime)
if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = C / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9)
dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = measure_FDF_parms(FDF = dirtyFDF,
phiArr = phiArr_radm2,
fwhmRMSF = fwhmRMSF,
dFDF = dFDFth,
lamSqArr_m2 = lambdaSqArr_m2,
lam0Sq = lam0Sq_m2)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
mDict["IfitStat"] = fitDict["fitStatus"]
mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fitDict["fitStatus"] >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fitDict["fitStatus"] >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
#Add information on nature of channels:
good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]
mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"]=good_channels.size
mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0)
mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
fracPol = mDict["fracPol"],
psi0_deg = mDict["polAngle0Fit_deg"],
RM_radm2 = mDict["phiPeakPIfit_rm2"])
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict)
if saveOutput:
if verbose: print("Saving debug plots:")
outFilePlot = prefixOut + ".debug-plots.pdf"
if verbose: print("> " + outFilePlot)
tmpFig.savefig(outFilePlot, bbox_inches = 'tight')
else:
tmpFig.show()
#add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"]=weightArr
aDict["dirtyFDF"]=dirtyFDF
if verbose:
# Print the results to the screen
log()
log('-'*80)
log('RESULTS:\n')
log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))
log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
mDict["dPolAngleFit_deg"]))
log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
mDict["dPolAngle0Fit_deg"]))
log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
mDict["dPhiPeakPIfit_rm2"]))
log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9))
log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units))
log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"],
mDict["dAmpPeakPIfit"],units))
log('QU Noise = %.4g %s' % (mDict["dQU"],units))
log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units))
log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units))
log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units))
log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"],
mDict["dSigmaAddPlusQ"],
mDict["dSigmaAddMinusQ"]))
log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"],
mDict["dSigmaAddPlusU"],
mDict["dSigmaAddMinusU"]))
log()
log('-'*80)
# Plot the RM Spread Function and dirty FDF
if showPlots or saveOutput:
fdfFig = plt.figure(figsize=(12.0, 8))
plot_rmsf_fdf_fig(phiArr = phiArr_radm2,
FDF = dirtyFDF,
phi2Arr = phi2Arr_radm2,
RMSFArr = RMSFArr,
fwhmRMSF = fwhmRMSF,
vLine = mDict["phiPeakPIfit_rm2"],
fig = fdfFig,
units = units)
# Use the custom navigation toolbar
# try:
# fdfFig.canvas.toolbar.pack_forget()
# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# fdfFig.show()
# Pause if plotting enabled
if showPlots:
plt.show()
elif saveOutput or debug:
if verbose: print("Saving RMSF and dirty FDF plot:")
outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf"
if verbose: print("> " + outFilePlot)
fdfFig.savefig(outFilePlot, bbox_inches = 'tight')
# #if verbose: print "Press <RETURN> to exit ...",
# input()
return mDict, aDict
def readFile(dataFile, nBits, verbose=True, debug=False):
"""
Read the I, Q & U data from the ASCII file.
Inputs:
datafile (str): relative or absolute path to file.
nBits (int): number of bits to store the data as.
verbose (bool): Print verbose messages to terminal?
debug (bool): Print full traceback in case of failure?
Returns:
data (list of arrays): List containing the columns found in the file.
If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU],
else [freq_Hz, q, u, dq, du].
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# Output prefix is derived from the input file name
# Read the data-file. Format=space-delimited, comments="#".
if verbose: print("Reading the data file '%s':" % dataFile)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr,
dIArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]
except Exception:
if verbose: print("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]
noStokesI = True
except Exception:
if verbose: print("...failed.")
if debug:
print(traceback.format_exc())
sys.exit()
if verbose: print("Successfully read in the Stokes spectra.")
return data
def saveOutput(outdict, arrdict, prefixOut, verbose):
# Save the dirty FDF, RMSF and weight array to ASCII files
if verbose: print("Saving the dirty FDF, RMSF weight arrays to ASCII files.")
outFile = prefixOut + "_FDFdirty.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phiArr_radm2"], arrdict["dirtyFDF"].real, arrdict["dirtyFDF"].imag)))
outFile = prefixOut + "_RMSF.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phi2Arr_radm2"], arrdict["RMSFArr"].real, arrdict["RMSFArr"].imag)))
outFile = prefixOut + "_weight.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["freqArr_Hz"], arrdict["weightArr"])))
# Save the measurements to a "key=value" text file
outFile = prefixOut + "_RMsynth.dat"
if verbose:
print("Saving the measurements on the FDF in 'key=val' and JSON formats.")
print("> %s" % outFile)
FH = open(outFile, "w")
for k, v in outdict.items():
FH.write("%s=%s\n" % (k, v))
FH.close()
outFile = prefixOut + "_RMsynth.json"
if verbose:
print("> %s" % outFile)
json.dump(dict(outdict), open(outFile, "w"))
#-----------------------------------------------------------------------------#
def main():
import argparse
"""
Start the function to perform RM-synthesis if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII
file. The Stokes I spectrum is first fit with a polynomial and the
resulting model used to create fractional q = Q/I and u = U/I spectra.
The ASCII file should the following columns, in a space separated format:
[freq_Hz, I, Q, U, I_err, Q_err, U_err]
OR
[freq_Hz, Q, U, Q_err, U_err]
To get outputs, one or more of the following flags must be set: -S, -p, -v.
"""
epilog_text="""
Outputs with -S flag:
_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]
_RMSF.dat: Computed RMSF [Phi, Q, U]
_RMsynth.dat: list of derived parameters for RM spectrum
(approximately equivalent to -v flag output)
_RMsynth.json: dictionary of derived parameters for RM spectrum
_weight.dat: Calculated channel weights [freq_Hz, weight]
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1,
help="ASCII file containing Stokes spectra & errors.")
parser.add_argument("-t", dest="fitRMSF", action="store_true",
help="fit a Gaussian to the RMSF [False]")
parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto].")
parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None,
help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)")
parser.add_argument("-s", dest="nSamples", type=float, default=10,
help="number of samples across the RMSF lobe [10].")
parser.add_argument("-w", dest="weightType", default="variance",
help="weighting [inverse variance] or 'uniform' (all 1s).")
parser.add_argument("-o", dest="polyOrd", type=int, default=2,
help="polynomial order to fit to I spectrum [2].")
parser.add_argument("-i", dest="noStokesI", action="store_true",
help="ignore the Stokes I spectrum [False].")
parser.add_argument("-b", dest="bit64", action="store_true",
help="use 64-bit floating point precision [False (uses 32-bit)]")
parser.add_argument("-p", dest="showPlots", action="store_true",
help="show the plots [False].")
parser.add_argument("-v", dest="verbose", action="store_true",
help="verbose output [False].")
parser.add_argument("-S", dest="saveOutput", action="store_true",
help="save the arrays and plots [False].")
parser.add_argument("-D", dest="debug", action="store_true",
help="turn on debugging messages & plots [False].")
parser.add_argument("-U", dest="units", type=str, default="Jy/beam",
help="Intensity units of the data. [Jy/beam]")
args = parser.parse_args()
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
dataDir, dummy = os.path.split(args.dataFile[0])
# Set the floating point precision
nBits = 32
if args.bit64:
nBits = 64
verbose=args.verbose
data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug)
# Run RM-synthesis on the spectra
mDict, aDict = run_rmsynth(data = data,
polyOrd = args.polyOrd,
phiMax_radm2 = args.phiMax_radm2,
dPhi_radm2 = args.dPhi_radm2,
nSamples = args.nSamples,
weightType = args.weightType,
fitRMSF = args.fitRMSF,
noStokesI = args.noStokesI,
nBits = nBits,
showPlots = args.showPlots,
debug = args.debug,
verbose = verbose,
units = args.units,
prefixOut = prefixOut,
args = args,
)
if args.saveOutput:
saveOutput(mDict, aDict, prefixOut, verbose)
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
| [((121, 21, 121, 55), 'os.path.splitext', 'os.path.splitext', ({(121, 38, 121, 54): 'args.dataFile[0]'}, {}), '(args.dataFile[0])', False, 'import os\n'), ((159, 13, 168, 50), 'RMutils.util_misc.create_frac_spectra', 'create_frac_spectra', (), '', False, 'from RMutils.util_misc import create_frac_spectra\n'), ((172, 21, 172, 70), 'numpy.linspace', 'np.linspace', ({(172, 33, 172, 46): 'freqArr_Hz[0]', (172, 48, 172, 62): 'freqArr_Hz[-1]', (172, 64, 172, 69): '10000'}, {}), '(freqArr_Hz[0], freqArr_Hz[-1], 10000)', True, 'import numpy as np\n'), ((174, 14, 174, 43), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((175, 4, 185, 49), 'RMutils.util_plotTk.plot_Ipqu_spectra_fig', 'plot_Ipqu_spectra_fig', (), '', False, 'from RMutils.util_plotTk import plot_Ipqu_spectra_fig\n'), ((219, 21, 219, 48), 'numpy.power', 'np.power', ({(219, 30, 219, 42): 'C / freqArr_Hz', (219, 44, 219, 47): '2.0'}, {}), '(C / freqArr_Hz, 2.0)', True, 'import numpy as np\n'), ((238, 19, 238, 70), 'numpy.linspace', 'np.linspace', ({(238, 31, 238, 45): 'startPhi_radm2', (238, 47, 238, 60): 'stopPhi_radm2', (238, 62, 238, 69): 'nChanRM'}, {}), '(startPhi_radm2, stopPhi_radm2, nChanRM)', True, 'import numpy as np\n'), ((253, 16, 253, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((256, 26, 263, 66), 'RMutils.util_RM.do_rmsynth_planes', 'do_rmsynth_planes', (), '', False, 'from RMutils.util_RM import do_rmsynth_planes\n'), ((287, 14, 287, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((304, 12, 309, 54), 'RMutils.util_RM.measure_FDF_parms', 'measure_FDF_parms', (), '', False, 'from RMutils.util_RM import measure_FDF_parms\n'), ((310, 22, 310, 38), 'RMutils.util_misc.toscalar', 'toscalar', ({(310, 31, 310, 37): 'Ifreq0'}, {}), '(Ifreq0)', False, 'from RMutils.util_misc import toscalar\n'), ((314, 25, 314, 44), 'RMutils.util_misc.toscalar', 'toscalar', ({(314, 34, 314, 43): 'lam0Sq_m2'}, {}), '(lam0Sq_m2)', False, 'from RMutils.util_misc import toscalar\n'), ((315, 24, 315, 42), 'RMutils.util_misc.toscalar', 'toscalar', ({(315, 33, 315, 41): 'freq0_Hz'}, {}), '(freq0_Hz)', False, 'from RMutils.util_misc import toscalar\n'), ((316, 24, 316, 42), 'RMutils.util_misc.toscalar', 'toscalar', ({(316, 33, 316, 41): 'fwhmRMSF'}, {}), '(fwhmRMSF)', False, 'from RMutils.util_misc import toscalar\n'), ((318, 22, 318, 38), 'RMutils.util_misc.toscalar', 'toscalar', ({(318, 31, 318, 37): 'dFDFth'}, {}), '(dFDFth)', False, 'from RMutils.util_misc import toscalar\n'), ((337, 13, 344, 74), 'RMutils.util_RM.measure_qu_complexity', 'measure_qu_complexity', (), '', False, 'from RMutils.util_RM import measure_qu_complexity\n'), ((569, 13, 570, 79), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((605, 21, 605, 55), 'os.path.splitext', 'os.path.splitext', ({(605, 38, 605, 54): 'args.dataFile[0]'}, {}), '(args.dataFile[0])', False, 'import os\n'), ((606, 21, 606, 52), 'os.path.split', 'os.path.split', ({(606, 35, 606, 51): 'args.dataFile[0]'}, {}), '(args.dataFile[0])', False, 'import os\n'), ((118, 11, 118, 43), 'os.path.exists', 'os.path.exists', ({(118, 26, 118, 42): 'args.dataFile[0]'}, {}), '(args.dataFile[0])', False, 'import os\n'), ((120, 8, 120, 18), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((150, 15, 150, 33), 'numpy.ones_like', 'np.ones_like', ({(150, 28, 150, 32): 'QArr'}, {}), '(QArr)', True, 'import numpy as np\n'), ((151, 16, 151, 35), 'numpy.zeros_like', 'np.zeros_like', ({(151, 30, 151, 34): 'QArr'}, {}), '(QArr)', True, 'import numpy as np\n'), ((173, 17, 173, 36), 'RMutils.util_misc.poly5', 'poly5', ({(173, 23, 173, 35): "fitDict['p']"}, {}), "(fitDict['p'])", False, 'from RMutils.util_misc import poly5\n'), ((200, 17, 200, 46), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((221, 25, 221, 50), 'numpy.nanmax', 'np.nanmax', ({(221, 35, 221, 49): 'lambdaSqArr_m2'}, {}), '(lambdaSqArr_m2)', True, 'import numpy as np\n'), ((222, 25, 222, 50), 'numpy.nanmin', 'np.nanmin', ({(222, 35, 222, 49): 'lambdaSqArr_m2'}, {}), '(lambdaSqArr_m2)', True, 'import numpy as np\n'), ((250, 20, 250, 60), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((293, 19, 293, 36), 'math.sqrt', 'm.sqrt', ({(293, 26, 293, 35): 'lam0Sq_m2'}, {}), '(lam0Sq_m2)', True, 'import math as m\n'), ((294, 13, 294, 32), 'RMutils.util_misc.poly5', 'poly5', ({(294, 19, 294, 31): "fitDict['p']"}, {}), "(fitDict['p'])", False, 'from RMutils.util_misc import poly5\n'), ((298, 25, 298, 44), 'numpy.isnan', 'np.isnan', ({(298, 34, 298, 43): 'weightArr'}, {}), '(weightArr)', True, 'import numpy as np\n'), ((317, 28, 317, 45), 'RMutils.util_misc.nanmedian', 'nanmedian', ({(317, 38, 317, 44): 'dQUArr'}, {}), '(dQUArr)', False, 'from RMutils.util_misc import nanmedian\n'), ((330, 28, 330, 61), 'numpy.min', 'np.min', ({(330, 35, 330, 60): 'freqArr_Hz[good_channels]'}, {}), '(freqArr_Hz[good_channels])', True, 'import numpy as np\n'), ((331, 28, 331, 61), 'numpy.max', 'np.max', ({(331, 35, 331, 60): 'freqArr_Hz[good_channels]'}, {}), '(freqArr_Hz[good_channels])', True, 'import numpy as np\n'), ((349, 17, 360, 49), 'RMutils.util_plotTk.plot_complexity_fig', 'plot_complexity_fig', (), '', False, 'from RMutils.util_plotTk import plot_complexity_fig\n'), ((413, 17, 413, 46), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((414, 8, 421, 45), 'RMutils.util_plotTk.plot_rmsf_fdf_fig', 'plot_rmsf_fdf_fig', (), '', False, 'from RMutils.util_plotTk import plot_rmsf_fdf_fig\n'), ((435, 8, 435, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((476, 9, 476, 57), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((602, 11, 602, 43), 'os.path.exists', 'os.path.exists', ({(602, 26, 602, 42): 'args.dataFile[0]'}, {}), '(args.dataFile[0])', False, 'import os\n'), ((604, 8, 604, 18), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((220, 32, 220, 51), 'numpy.diff', 'np.diff', ({(220, 40, 220, 50): 'freqArr_Hz'}, {}), '(freqArr_Hz)', True, 'import numpy as np\n'), ((223, 39, 223, 62), 'numpy.diff', 'np.diff', ({(223, 47, 223, 61): 'lambdaSqArr_m2'}, {}), '(lambdaSqArr_m2)', True, 'import numpy as np\n'), ((224, 39, 224, 62), 'numpy.diff', 'np.diff', ({(224, 47, 224, 61): 'lambdaSqArr_m2'}, {}), '(lambdaSqArr_m2)', True, 'import numpy as np\n'), ((227, 27, 227, 38), 'math.sqrt', 'm.sqrt', ({(227, 34, 227, 37): '(3.0)'}, {}), '(3.0)', True, 'import math as m\n'), ((231, 23, 231, 34), 'math.sqrt', 'm.sqrt', ({(231, 30, 231, 33): '(3.0)'}, {}), '(3.0)', True, 'import math as m\n'), ((247, 26, 247, 47), 'numpy.power', 'np.power', ({(247, 35, 247, 41): 'dQUArr', (247, 43, 247, 46): '(2.0)'}, {}), '(dQUArr, 2.0)', True, 'import numpy as np\n'), ((333, 50, 333, 69), 'numpy.diff', 'np.diff', ({(333, 58, 333, 68): 'freqArr_Hz'}, {}), '(freqArr_Hz)', True, 'import numpy as np\n'), ((208, 18, 208, 39), 'numpy.nanmax', 'np.nanmax', ({(208, 28, 208, 38): 'freqArr_Hz'}, {}), '(freqArr_Hz)', True, 'import numpy as np\n'), ((208, 40, 208, 61), 'numpy.nanmin', 'np.nanmin', ({(208, 50, 208, 60): 'freqArr_Hz'}, {}), '(freqArr_Hz)', True, 'import numpy as np\n'), ((270, 43, 270, 60), 'numpy.isfinite', 'np.isfinite', ({(270, 55, 270, 59): 'qArr'}, {}), '(qArr)', True, 'import numpy as np\n'), ((299, 73, 299, 90), 'numpy.sum', 'np.sum', ({(299, 80, 299, 89): 'weightArr'}, {}), '(weightArr)', True, 'import numpy as np\n'), ((329, 57, 329, 74), 'numpy.isfinite', 'np.isfinite', ({(329, 69, 329, 73): 'qArr'}, {}), '(qArr)', True, 'import numpy as np\n'), ((485, 25, 485, 73), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((144, 12, 144, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((209, 21, 209, 39), 'numpy.min', 'np.min', ({(209, 28, 209, 38): 'freqArr_Hz'}, {}), '(freqArr_Hz)', True, 'import numpy as np\n'), ((210, 21, 210, 39), 'numpy.max', 'np.max', ({(210, 28, 210, 38): 'freqArr_Hz'}, {}), '(freqArr_Hz)', True, 'import numpy as np\n'), ((494, 12, 494, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((299, 44, 299, 65), 'numpy.nan_to_num', 'np.nan_to_num', ({(299, 58, 299, 64): 'dQUArr'}, {}), '(dQUArr)', True, 'import numpy as np\n'), ((143, 20, 143, 42), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((493, 22, 493, 44), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n')] |
awesome-archive/cogdl | cogdl/modules/conv/__init__.py | 0a354eaaaf851e7218197508e7e85a81d3fb5753 | from .message_passing import MessagePassing
from .gcn_conv import GCNConv
from .gat_conv import GATConv
from .se_layer import SELayer
from .aggregator import Meanaggregator
from .maggregator import meanaggr
__all__ = [
'MessagePassing',
'GCNConv',
'GATConv',
'SELayer',
'Meanaggregator'
]
| [] |
gpescia/MyNetKet | netket/utils/jax.py | 958510966a5870d9d491de0628903cf1fc210921 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from . import struct
def get_afun_if_module(mod_or_fun) -> Callable:
"""Returns the apply function if it's a module. Does nothing otherwise."""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun.apply
else:
return mod_or_fun
@struct.dataclass
class WrappedApplyFun:
"""Wraps a callable to be a module-like object with the method `apply`."""
apply: Callable
"""The wrapped callable."""
def __repr__(self):
return f"{type(self).__name__}(apply={self.apply}, hash={hash(self)})"
def wrap_afun(mod_or_fun):
"""Wraps a callable to be a module-like object with the method `apply`.
Does nothing if it already has an apply method.
"""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun
else:
return WrappedApplyFun(mod_or_fun)
| [] |
Kungreye/gee_tools | geetools/batch/featurecollection.py | d0712ac78410250c41503ca08075f536d58d2ef3 | # coding=utf-8
import ee
from . import utils
import json
import csv
from .. import tools
def fromShapefile(filename, crs=None, start=None, end=None):
""" Convert an ESRI file (.shp and .dbf must be present) to a
ee.FeatureCollection
At the moment only works for shapes with less than 1000 records and doesn't
handle complex shapes.
:param filename: the name of the filename. If the shape is not in the
same path than the script, specify a path instead.
:type filename: str
:param start:
:return: the FeatureCollection
:rtype: ee.FeatureCollection
"""
import shapefile
wgs84 = ee.Projection('EPSG:4326')
# read the filename
reader = shapefile.Reader(filename)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
field_types = [field[1] for field in fields]
types = dict(zip(field_names, field_types))
features = []
projection = utils.getProjection(filename) if not crs else crs
# catch a string with format "EPSG:XXX"
if isinstance(projection, str):
if 'EPSG:' in projection:
projection = projection.split(':')[1]
projection = 'EPSG:{}'.format(projection)
# filter records with start and end
start = start if start else 0
if not end:
records = reader.shapeRecords()
end = len(records)
else:
end = end + 1
if (end-start)>1000:
msg = "Can't process more than 1000 records at a time. Found {}"
raise ValueError(msg.format(end-start))
for i in range(start, end):
# atr = dict(zip(field_names, sr.record))
sr = reader.shapeRecord(i)
atr = {}
for fld, rec in zip(field_names, sr.record):
fld_type = types[fld]
if fld_type == 'D':
value = ee.Date(rec.isoformat()).millis().getInfo()
elif fld_type in ['C', 'N', 'F']:
value = rec
else:
continue
atr[fld] = value
geom = sr.shape.__geo_interface__
if projection is not None:
geometry = ee.Geometry(geom, projection) \
.transform(wgs84, 1)
else:
geometry = ee.Geometry(geom)
feat = ee.Feature(geometry, atr)
features.append(feat)
return ee.FeatureCollection(features)
def fromGeoJSON(filename=None, data=None, crs=None):
""" Create a list of Features from a GeoJSON file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
if filename:
with open(filename, 'r') as geoj:
content = geoj.read()
geodict = json.loads(content)
else:
geodict = data
features = []
# Get crs from GeoJSON
if not crs:
filecrs = geodict.get('crs')
if filecrs:
name = filecrs.get('properties').get('name')
splitcrs = name.split(':')
cleancrs = [part for part in splitcrs if part]
try:
if cleancrs[-1] == 'CRS84':
crs = 'EPSG:4326'
elif cleancrs[-2] == 'EPSG':
crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1])
else:
raise ValueError('{} not recognized'.format(name))
except IndexError:
raise ValueError('{} not recognized'.format(name))
else:
crs = 'EPSG:4326'
for n, feat in enumerate(geodict.get('features')):
properties = feat.get('properties')
geom = feat.get('geometry')
ty = geom.get('type')
coords = geom.get('coordinates')
if ty == 'GeometryCollection':
ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs)
else:
if ty == 'Polygon':
coords = utils.removeZ(coords) if utils.hasZ(coords) else coords
ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs))
ee_feat = ee.feature.Feature(ee_geom, properties)
features.append(ee_feat)
return tuple(features)
def fromKML(filename=None, data=None, crs=None, encoding=None):
""" Create a list of Features from a KML file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding)
features = geojsondict['features']
for feat in features:
# remove styleUrl
prop = feat['properties']
if 'styleUrl' in prop:
prop.pop('styleUrl')
# remove Z value if needed
geom = feat['geometry']
ty = geom['type']
if ty == 'GeometryCollection':
geometries = geom['geometries']
for g in geometries:
c = g['coordinates']
utils.removeZ(c)
else:
coords = geom['coordinates']
utils.removeZ(coords)
return fromGeoJSON(data=geojsondict, crs=crs)
def toDict(collection, split_at=4000):
""" Get the FeatureCollection as a dict object """
size = collection.size()
condition = size.gte(4999)
def greater():
size = collection.size()
seq = tools.ee_list.sequence(0, size, split_at)
limits = ee.List.zip(seq.slice(1), seq)
def over_limits(n):
n = ee.List(n)
ini = ee.Number(n.get(0))
end = ee.Number(n.get(1))
return ee.FeatureCollection(collection.toList(ini, end))
return limits.map(over_limits)
collections = ee.List(
ee.Algorithms.If(condition,
greater(),
ee.List([collection])))
collections_size = collections.size().getInfo()
col = ee.FeatureCollection(collections.get(0))
content = col.getInfo()
feats = content['features']
for i in range(0, collections_size):
c = ee.FeatureCollection(collections.get(i))
content_c = c.getInfo()
feats_c = content_c['features']
feats = feats + feats_c
content['features'] = feats
return content
def toGeoJSON(collection, name, path=None, split_at=4000):
""" Export a FeatureCollection to a GeoJSON file
:param collection: The collection to export
:type collection: ee.FeatureCollection
:param name: name of the resulting file
:type name: str
:param path: The path where to save the file. If None, will be saved
in the current folder
:type path: str
:param split_at: limit to avoid an EE Exception
:type split_at: int
:return: A GeoJSON (.geojson) file.
:rtype: file
"""
import json
import os
if not path:
path = os.getcwd()
# name
if name[-8:-1] != '.geojson':
fname = name+'.geojson'
content = toDict(collection, split_at)
with open(os.path.join(path, fname), 'w') as thefile:
thefile.write(json.dumps(content))
return thefile
def toCSV(collection, filename, split_at=4000):
""" Alternative to download a FeatureCollection as a CSV """
d = toDict(collection, split_at)
fields = list(d['columns'].keys())
fields.append('geometry')
features = d['features']
ext = filename[-4:]
if ext != '.csv':
filename += '.csv'
with open(filename, 'w') as thecsv:
writer = csv.DictWriter(thecsv, fields)
writer.writeheader()
# write rows
for feature in features:
properties = feature['properties']
fid = feature['id']
geom = feature['geometry']['type']
# match fields
properties['system:index'] = fid
properties['geometry'] = geom
# write row
writer.writerow(properties)
return thecsv
def toLocal(collection, filename, filetype=None, selectors=None, path=None):
""" Download a FeatureCollection to a local file a CSV or geoJSON file.
This uses a different method than `toGeoJSON` and `toCSV`
:param filetype: The filetype of download, either CSV or JSON.
Defaults to CSV.
:param selectors: The selectors that should be used to determine which
attributes will be downloaded.
:param filename: The name of the file to be downloaded
"""
if not filetype:
filetype = 'CSV'
url = collection.getDownloadURL(filetype, selectors, filename)
thefile = utils.downloadFile(url, filename, filetype, path)
return thefile
def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs):
""" This function can create folders and ImageCollections on the fly.
The rest is the same to Export.image.toAsset. You can pass the same
params as the original function
:param table: the feature collection to upload
:type table: ee.FeatureCollection
:param assetPath: path to upload the image (only PATH, without
filename)
:type assetPath: str
:param name: filename for the image (AssetID will be assetPath + name)
:type name: str
:return: the tasks
:rtype: ee.batch.Task
"""
# Check if the user is specified in the asset path
is_user = (assetPath.split('/')[0] == 'users')
if not is_user:
user = ee.batch.data.getAssetRoots()[0]['id']
assetPath = "{}/{}".format(user, assetPath)
if create:
# Recrusive create path
path2create = assetPath # '/'.join(assetPath.split('/')[:-1])
utils.createAssets([path2create], 'Folder', True)
# Asset ID (Path + name)
assetId = '/'.join([assetPath, name])
# Description
description = utils.matchDescription(name)
# Init task
task = ee.batch.Export.table.toAsset(table, assetId=assetId,
description=description, **kwargs)
task.start()
if verbose:
print('Exporting {} to {}'.format(name, assetPath))
return task | [((25, 12, 25, 38), 'ee.Projection', 'ee.Projection', ({(25, 26, 25, 37): '"""EPSG:4326"""'}, {}), "('EPSG:4326')", False, 'import ee\n'), ((27, 13, 27, 39), 'shapefile.Reader', 'shapefile.Reader', ({(27, 30, 27, 38): 'filename'}, {}), '(filename)', False, 'import shapefile\n'), ((75, 11, 75, 41), 'ee.FeatureCollection', 'ee.FeatureCollection', ({(75, 32, 75, 40): 'features'}, {}), '(features)', False, 'import ee\n'), ((332, 11, 333, 75), 'ee.batch.Export.table.toAsset', 'ee.batch.Export.table.toAsset', (), '', False, 'import ee\n'), ((72, 15, 72, 40), 'ee.Feature', 'ee.Feature', ({(72, 26, 72, 34): 'geometry', (72, 36, 72, 39): 'atr'}, {}), '(geometry, atr)', False, 'import ee\n'), ((131, 18, 131, 57), 'ee.feature.Feature', 'ee.feature.Feature', ({(131, 37, 131, 44): 'ee_geom', (131, 46, 131, 56): 'properties'}, {}), '(ee_geom, properties)', False, 'import ee\n'), ((235, 15, 235, 26), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((263, 17, 263, 47), 'csv.DictWriter', 'csv.DictWriter', ({(263, 32, 263, 38): 'thecsv', (263, 40, 263, 46): 'fields'}, {}), '(thecsv, fields)', False, 'import csv\n'), ((71, 23, 71, 40), 'ee.Geometry', 'ee.Geometry', ({(71, 35, 71, 39): 'geom'}, {}), '(geom)', False, 'import ee\n'), ((96, 22, 96, 41), 'json.loads', 'json.loads', ({(96, 33, 96, 40): 'content'}, {}), '(content)', False, 'import json\n'), ((187, 16, 187, 26), 'ee.List', 'ee.List', ({(187, 24, 187, 25): 'n'}, {}), '(n)', False, 'import ee\n'), ((197, 25, 197, 46), 'ee.List', 'ee.List', ({(197, 33, 197, 45): '[collection]'}, {}), '([collection])', False, 'import ee\n'), ((243, 14, 243, 39), 'os.path.join', 'os.path.join', ({(243, 27, 243, 31): 'path', (243, 33, 243, 38): 'fname'}, {}), '(path, fname)', False, 'import os\n'), ((244, 22, 244, 41), 'json.dumps', 'json.dumps', ({(244, 33, 244, 40): 'content'}, {}), '(content)', False, 'import json\n'), ((319, 15, 319, 44), 'ee.batch.data.getAssetRoots', 'ee.batch.data.getAssetRoots', ({}, {}), '()', False, 'import ee\n'), ((68, 23, 68, 52), 'ee.Geometry', 'ee.Geometry', ({(68, 35, 68, 39): 'geom', (68, 41, 68, 51): 'projection'}, {}), '(geom, projection)', False, 'import ee\n'), ((130, 64, 130, 82), 'ee.Projection', 'ee.Projection', ({(130, 78, 130, 81): 'crs'}, {}), '(crs)', False, 'import ee\n')] |
extwiii/Rock-paper-scissors-lizard-Spock | index.py | 7a8eda9f168636a9878c91620e625997ba0994a8 | # Rock-paper-scissors-lizard-Spock template
# The key idea of this program is to equate the strings
# "rock", "paper", "scissors", "lizard", "Spock" to numbers
# as follows:
#
# 0 - rock
# 1 - Spock
# 2 - paper
# 3 - lizard
# 4 - scissors
import random
def name_to_number(name):
if name == "rock":
return 0
elif name == 'Spock':
return 1
elif name == 'paper':
return 2
elif name == 'lizard':
return 3
elif name == 'scissors':
return 4
else :
return None
def number_to_name(number):
if number == 0:
return "rock"
elif number == 1:
return 'Spock'
elif number == 2:
return 'paper'
elif number == 3:
return 'lizard'
elif number == 4:
return 'scissors'
else :
return None
def rpsls(player_choice):
print ""
print "Player chooses",player_choice
player_number = name_to_number(player_choice)
comp_number = random.randrange(5)
comp_choice = number_to_name(comp_number)
print "Computer chooses",comp_choice
diff = (player_number - comp_number)%5
if (diff == 1) or (diff == 2):
print "Player wins!"
elif (diff == 3) or (diff == 4):
print "Computer wins!"
else :
print "Tie!"
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| [] |
greenelab/phenoplier | libs/clustering/ensembles/utils.py | 95f04b17f0b5227560fcf32ac0a85b2c5aa9001f | """
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, pandas dataframe, or any other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this format: { 'k-means
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final dataframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final dataframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A pandas DataFrame with all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the get_params() method) and any other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):
# get partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).astype(float)
else:
partition = clus_obj.fit_predict(data).astype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# get number of clusters
partition_no_nan = partition[~np.isnan(partition)]
n_clusters = np.unique(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = pd.Series(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.get_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = getattr(clus_obj, attr)
ensemble.append(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return pd.DataFrame(ensemble).set_index("clusterer_id")
def get_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for all objects using the ensemble information). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered together in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for all objects
(coassociation matrix).
"""
def _compare(x, y):
xy = np.array([x, y]).T
xy = xy[~np.isnan(xy).any(axis=1)]
return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0]
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to apply on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=len(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best performing method according to the selection criterion
best_method = max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a series of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_mean": np.mean(ari_values),
"ari_median": np.median(ari_values),
"ari_std": np.std(ari_values),
"ami_mean": np.mean(ami_values),
"ami_median": np.median(ami_values),
"ami_std": np.std(ami_values),
"nmi_mean": np.mean(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_std": np.std(nmi_values),
}
return part, performance_values
| [((121, 11, 123, 5), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (), '', False, 'from sklearn.metrics import pairwise_distances\n'), ((90, 8, 90, 33), 'clustering.utils.reset_estimator', 'reset_estimator', ({(90, 24, 90, 32): 'clus_obj'}, {}), '(clus_obj)', False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((157, 9, 157, 48), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', (), '', False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((241, 20, 241, 39), 'numpy.mean', 'np.mean', ({(241, 28, 241, 38): 'ari_values'}, {}), '(ari_values)', True, 'import numpy as np\n'), ((242, 22, 242, 43), 'numpy.median', 'np.median', ({(242, 32, 242, 42): 'ari_values'}, {}), '(ari_values)', True, 'import numpy as np\n'), ((243, 19, 243, 37), 'numpy.std', 'np.std', ({(243, 26, 243, 36): 'ari_values'}, {}), '(ari_values)', True, 'import numpy as np\n'), ((244, 20, 244, 39), 'numpy.mean', 'np.mean', ({(244, 28, 244, 38): 'ami_values'}, {}), '(ami_values)', True, 'import numpy as np\n'), ((245, 22, 245, 43), 'numpy.median', 'np.median', ({(245, 32, 245, 42): 'ami_values'}, {}), '(ami_values)', True, 'import numpy as np\n'), ((246, 19, 246, 37), 'numpy.std', 'np.std', ({(246, 26, 246, 36): 'ami_values'}, {}), '(ami_values)', True, 'import numpy as np\n'), ((247, 20, 247, 39), 'numpy.mean', 'np.mean', ({(247, 28, 247, 38): 'nmi_values'}, {}), '(nmi_values)', True, 'import numpy as np\n'), ((248, 22, 248, 43), 'numpy.median', 'np.median', ({(248, 32, 248, 42): 'nmi_values'}, {}), '(nmi_values)', True, 'import numpy as np\n'), ((249, 19, 249, 37), 'numpy.std', 'np.std', ({(249, 26, 249, 36): 'nmi_values'}, {}), '(nmi_values)', True, 'import numpy as np\n'), ((68, 12, 68, 37), 'clustering.utils.reset_estimator', 'reset_estimator', ({(68, 28, 68, 36): 'clus_obj'}, {}), '(clus_obj)', False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((92, 11, 92, 33), 'pandas.DataFrame', 'pd.DataFrame', ({(92, 24, 92, 32): 'ensemble'}, {}), '(ensemble)', True, 'import pandas as pd\n'), ((117, 13, 117, 29), 'numpy.array', 'np.array', ({(117, 22, 117, 28): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((161, 12, 161, 31), 'concurrent.futures.as_completed', 'as_completed', ({(161, 25, 161, 30): 'tasks'}, {}), '(tasks)', False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((221, 12, 221, 74), 'clustering.utils.compare_arrays', 'compare_arrays', (), '', False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((228, 12, 228, 74), 'clustering.utils.compare_arrays', 'compare_arrays', (), '', False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((235, 12, 235, 74), 'clustering.utils.compare_arrays', 'compare_arrays', (), '', False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((64, 21, 64, 48), 'numpy.unique', 'np.unique', ({(64, 31, 64, 47): 'partition_no_nan'}, {}), '(partition_no_nan)', True, 'import numpy as np\n'), ((63, 38, 63, 57), 'numpy.isnan', 'np.isnan', ({(63, 47, 63, 56): 'partition'}, {}), '(partition)', True, 'import numpy as np\n'), ((118, 17, 118, 29), 'numpy.isnan', 'np.isnan', ({(118, 26, 118, 28): 'xy'}, {}), '(xy)', True, 'import numpy as np\n')] |
toshi-click/chart_app | backend/chart/application/service/employees.py | 10577d7835554a93688ae0c58ecb25fbe2925bec | import logging
from django.db import transaction, connection
from django.utils import timezone
from django.utils.timezone import localtime
from chart.application.enums.department_type import DepartmentType
from chart.application.enums.gender_type import GenderType
from chart.application.service.app_logic_base import AppLogicBaseService
from chart.models import Employees, Departments
"""
employeesテーブルを操作するクラスです。
"""
class EmployeesService(AppLogicBaseService):
def __init__(self):
super().__init__()
@staticmethod
@transaction.atomic()
def create_employees():
"""
Employeesを作成する
"""
service = EmployeesService()
for emp_no in range(1, 11):
if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0:
if emp_no <= 5:
department_no = DepartmentType.SALES.value
else:
department_no = DepartmentType.MARKETING.value
select_model = Departments.objects.filter(department_no=department_no).values("id").first()
# データを登録する
service._regist_employees(select_model['id'], emp_no)
@staticmethod
@transaction.atomic()
def create_departments():
"""
Departmentsを作成する
"""
service = EmployeesService()
# データをすべて削除する
# ForeignKeyが指定されているためdeleteコマンドを実行する
Departments.objects.all().delete()
for department_type in DepartmentType:
department_no = department_type.value
if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0:
# データを登録する
service._regist_departments(department_no, department_type.en_name)
@staticmethod
@transaction.atomic()
def update_employees():
"""
Employeesを更新する
"""
service = EmployeesService()
# filterによる絞込を行う
# gt:...より大きい(>),lt:...より小さい(<)になる
for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values(
"id").first()
department_id = select_model['id']
department_date_from = 20190903
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
# filterによる絞込を行う
# gte:...以上(>=),lte:...以下(<=)になる
for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values("id").first()
department_id = select_model['id']
department_date_from = 20190905
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
@staticmethod
def select_employees():
"""
Employeesを検索する
"""
# テーブル名__項目名で指定するとINNER JOINになる
# Queryは参照先のテーブルを参照する度に発行されます
for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value,
delete_flag=0):
logging.debug("reference:emp_no={}".format(employees_item.emp_no))
logging.debug("reference:department_no={}".format(employees_item.department.department_no))
logging.debug("reference:department_name={}".format(employees_item.department.department_name))
logging.debug("reference:first_name={}".format(employees_item.first_name))
logging.debug("reference:last_name={}".format(employees_item.last_name))
# select_relatedを使用した参照先情報を取得してキャッシュします
# Queryは1回のみ発行されます
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related("department"):
logging.debug("select_related:emp_no={}".format(employees_item.emp_no))
logging.debug("select_related:first_name={}".format(employees_item.first_name))
logging.debug("select_related:last_name={}".format(employees_item.last_name))
logging.debug("select_related:department_no={}".format(employees_item.department.department_no))
logging.debug("select_related:department_name={}".format(employees_item.department.department_name))
# prefetch_relatedを使用した参照先情報を取得してキャッシュします
# Queryは2回発行されてForeignKeyで結合します
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related(
"department__employees_set"):
logging.debug("prefetch_related:emp_no={}".format(employees_item.emp_no))
logging.debug("prefetch_related:first_name={}".format(employees_item.first_name))
logging.debug("prefetch_related:last_name={}".format(employees_item.last_name))
logging.debug("prefetch_related:department_no={}".format(employees_item.department.department_no))
logging.debug("prefetch_related:department_name={}".format(employees_item.department.department_name))
@staticmethod
@transaction.atomic()
def truncate_employees():
"""
トランケートを行う
"""
cursor = connection.cursor()
cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table))
def _regist_employees(self, department_id, emp_no):
"""
employeesを登録する
"""
self.regist_model = Employees()
self.regist_model.emp_no = emp_no
self.regist_model.department_id = department_id
self.regist_model.first_name = "first_name_" + str(emp_no).zfill(3)
self.regist_model.last_name = "last_name_" + str(emp_no).zfill(3)
self.regist_model.gender = GenderType.MAN.value
self.regist_model.department_date_from = "20190902"
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
return self.regist_model.id
def _regist_departments(self, department_no, department_name):
"""
departmentsを登録する
"""
self.regist_model = Departments()
self.regist_model.department_no = department_no
self.regist_model.department_name = department_name
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
def _update_employees_department(self, employees_id, department_id, department_date_from):
"""
配属情報を更新する
"""
self.update_model = Employees()
self.update_model.pk = employees_id
self.update_model.department_id = department_id
self.update_model.department_date_from = department_date_from
self.update_model.update_dt = localtime(timezone.now())
self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
| [((20, 5, 20, 25), 'django.db.transaction.atomic', 'transaction.atomic', ({}, {}), '()', False, 'from django.db import transaction, connection\n'), ((38, 5, 38, 25), 'django.db.transaction.atomic', 'transaction.atomic', ({}, {}), '()', False, 'from django.db import transaction, connection\n'), ((56, 5, 56, 25), 'django.db.transaction.atomic', 'transaction.atomic', ({}, {}), '()', False, 'from django.db import transaction, connection\n'), ((119, 5, 119, 25), 'django.db.transaction.atomic', 'transaction.atomic', ({}, {}), '()', False, 'from django.db import transaction, connection\n'), ((65, 30, 65, 97), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((76, 30, 76, 99), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((91, 30, 92, 69), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((124, 17, 124, 36), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import transaction, connection\n'), ((131, 28, 131, 39), 'chart.models.Employees', 'Employees', ({}, {}), '()', False, 'from chart.models import Employees, Departments\n'), ((148, 28, 148, 41), 'chart.models.Departments', 'Departments', ({}, {}), '()', False, 'from chart.models import Employees, Departments\n'), ((160, 28, 160, 39), 'chart.models.Employees', 'Employees', ({}, {}), '()', False, 'from chart.models import Employees, Departments\n'), ((139, 48, 139, 62), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((140, 48, 140, 62), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((152, 48, 152, 62), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((153, 48, 153, 62), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((164, 48, 164, 62), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((47, 8, 47, 33), 'chart.models.Departments.objects.all', 'Departments.objects.all', ({}, {}), '()', False, 'from chart.models import Employees, Departments\n'), ((101, 30, 101, 84), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((110, 30, 110, 84), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((28, 15, 28, 69), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((51, 15, 51, 85), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((67, 27, 67, 100), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((78, 27, 78, 95), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n'), ((33, 31, 33, 86), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', (), '', False, 'from chart.models import Employees, Departments\n')] |
pwyf/data-quality-tester | DataQualityTester/views/pages.py | d7674849c64d4d41ff4e4b6b12631994c7ce0a92 | from flask import render_template
def home():
return render_template('upload.html')
def about():
return render_template('about.html')
| [((5, 11, 5, 41), 'flask.render_template', 'render_template', ({(5, 27, 5, 40): '"""upload.html"""'}, {}), "('upload.html')", False, 'from flask import render_template\n'), ((9, 11, 9, 40), 'flask.render_template', 'render_template', ({(9, 27, 9, 39): '"""about.html"""'}, {}), "('about.html')", False, 'from flask import render_template\n')] |
roselight/Image-Recognition-with-OpenCv | hastakayit_gui.py | 4d0607f37bc80ee0b00790cdcbb0a22c76852ac4 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\hastakayit_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector
from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow
from PyQt5.QtCore import Qt, QDate, QDateTime
# Veritabanı bağlantısı için sql cümleciği oluşturuldu.
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="12345",
database="cilth_vt"
)
cursor = db.cursor()
class Ui_MainWindow2(QMainWindow):
def setupUi2(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 205)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.btn_kayit = QtWidgets.QPushButton(self.centralwidget)
self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("../avatar.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.btn_kayit.setIcon(icon1)
self.btn_kayit.setObjectName("btn_kayit")
self.btn_kayit.clicked.connect(self.kayitekle)
self.btn_cikis = QtWidgets.QPushButton(self.centralwidget)
self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31))
self.btn_cikis.setObjectName("btn_cikis")
self.btn_cikis.clicked.connect(self.close)
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_htc.setObjectName("lbl_htc")
self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1)
self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hadsoyad.setObjectName("lbl_hadsoyad")
self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1)
self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hcinsiyet.setObjectName("lbl_hcinsiyet")
self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1)
self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hdt.setObjectName("lbl_hdt")
self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1)
self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2)
self.dt_hdt.setObjectName("dt_hdt")
self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def kayitekle(self):
# k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir.
h_tc=self.lineEdit.text()
h_ads=self.lineEdit_2.text()
h_csyt=self.lineEdit_3.text()
h_dt=self.dt_hdt.text()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
QMessageBox.setWindowIcon(self, icon)
try:
hasta_ekle = ("INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)")
cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt))
db.commit()
veri = cursor.rowcount
except:
veri=2
if (veri == 1):
QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarılı.")
else:
QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarısız")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı"))
self.btn_kayit.setText(_translate("MainWindow", "ONAYLA"))
self.btn_cikis.setText(_translate("MainWindow", "İPTAL"))
self.lbl_htc.setText(_translate("MainWindow", "TC Kimlik No:"))
self.lbl_hadsoyad.setText(_translate("MainWindow", "Hasta Adı Soyadı:"))
self.lbl_hcinsiyet.setText(_translate("MainWindow", "Cinsiyet: "))
self.lbl_hdt.setText(_translate("MainWindow", "Doğum Tarihi:"))
self.dt_hdt.setDisplayFormat(_translate("MainWindow", "yyyy.MM.dd"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow2()
ui.setupUi2(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [((122, 10, 122, 42), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', ({(122, 33, 122, 41): 'sys.argv'}, {}), '(sys.argv)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((123, 17, 123, 40), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ({}, {}), '()', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((28, 15, 28, 28), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ({}, {}), '()', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((32, 29, 32, 58), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ({(32, 47, 32, 57): 'MainWindow'}, {}), '(MainWindow)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((34, 25, 34, 66), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ({(34, 47, 34, 65): 'self.centralwidget'}, {}), '(self.centralwidget)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((36, 16, 36, 29), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ({}, {}), '()', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((41, 25, 41, 66), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ({(41, 47, 41, 65): 'self.centralwidget'}, {}), '(self.centralwidget)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((45, 34, 45, 71), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ({(45, 52, 45, 70): 'self.centralwidget'}, {}), '(self.centralwidget)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((48, 28, 48, 74), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ({(48, 50, 48, 73): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((51, 23, 51, 64), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ({(51, 40, 51, 63): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((54, 28, 54, 69), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ({(54, 45, 54, 68): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((57, 29, 57, 70), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ({(57, 46, 57, 69): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((60, 26, 60, 70), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ({(60, 46, 60, 69): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((63, 26, 63, 70), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ({(63, 46, 63, 69): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((66, 24, 66, 68), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ({(66, 44, 66, 67): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((69, 23, 69, 64), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ({(69, 40, 69, 63): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((72, 22, 72, 66), 'PyQt5.QtWidgets.QDateEdit', 'QtWidgets.QDateEdit', ({(72, 42, 72, 65): 'self.gridLayoutWidget_2'}, {}), '(self.gridLayoutWidget_2)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((77, 25, 77, 57), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', ({(77, 46, 77, 56): 'MainWindow'}, {}), '(MainWindow)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((82, 8, 82, 57), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', ({(82, 46, 82, 56): 'MainWindow'}, {}), '(MainWindow)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((91, 13, 91, 26), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ({}, {}), '()', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((93, 6, 93, 43), 'PyQt5.QtWidgets.QMessageBox.setWindowIcon', 'QMessageBox.setWindowIcon', ({(93, 32, 93, 36): 'self', (93, 38, 93, 42): 'icon'}, {}), '(self, icon)', False, 'from PyQt5.QtWidgets import QMessageBox, QWidget, QMainWindow\n'), ((29, 23, 29, 56), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ({(29, 37, 29, 55): '"""../heartbeat.png"""'}, {}), "('../heartbeat.png')", False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((35, 35, 35, 66), 'PyQt5.QtCore.QRect', 'QtCore.QRect', ({(35, 48, 35, 51): '(180)', (35, 53, 35, 56): '(150)', (35, 58, 35, 61): '(121)', (35, 63, 35, 65): '(31)'}, {}), '(180, 150, 121, 31)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((37, 24, 37, 54), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ({(37, 38, 37, 53): '"""../avatar.png"""'}, {}), "('../avatar.png')", False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((42, 35, 42, 66), 'PyQt5.QtCore.QRect', 'QtCore.QRect', ({(42, 48, 42, 51): '(310)', (42, 53, 42, 56): '(150)', (42, 58, 42, 61): '(121)', (42, 63, 42, 65): '(31)'}, {}), '(310, 150, 121, 31)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((46, 44, 46, 74), 'PyQt5.QtCore.QRect', 'QtCore.QRect', ({(46, 57, 46, 59): '(10)', (46, 61, 46, 63): '(10)', (46, 65, 46, 68): '(571)', (46, 70, 46, 73): '(128)'}, {}), '(10, 10, 571, 128)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((92, 21, 92, 51), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ({(92, 35, 92, 50): '"""heartbeat.png"""'}, {}), "('heartbeat.png')", False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((106, 9, 106, 82), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(106, 33, 106, 37): 'self', (106, 39, 106, 57): '"""BİLGİLENDİRME"""', (106, 59, 106, 81): '"""İşlem Başarılı."""'}, {}), "(self, 'BİLGİLENDİRME', 'İşlem Başarılı.')", False, 'from PyQt5.QtWidgets import QMessageBox, QWidget, QMainWindow\n'), ((108, 10, 108, 83), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(108, 34, 108, 38): 'self', (108, 40, 108, 58): '"""BİLGİLENDİRME"""', (108, 60, 108, 82): '"""İşlem Başarısız"""'}, {}), "(self, 'BİLGİLENDİRME', 'İşlem Başarısız')", False, 'from PyQt5.QtWidgets import QMessageBox, QWidget, QMainWindow\n'), ((74, 49, 74, 73), 'PyQt5.QtCore.QDate', 'QtCore.QDate', ({(74, 62, 74, 66): '(2019)', (74, 68, 74, 69): '(1)', (74, 71, 74, 72): '(1)'}, {}), '(2019, 1, 1)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((74, 75, 74, 96), 'PyQt5.QtCore.QTime', 'QtCore.QTime', ({(74, 88, 74, 89): '(0)', (74, 91, 74, 92): '(0)', (74, 94, 74, 95): '(0)'}, {}), '(0, 0, 0)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
loonwerks/AGREE | .travis/manage_daily_builds.py | 58640ab89aaa3c72ccca0b8c80cf96d1815981da | #!/usr/bin/env python3
'''
Copyright (c) 2021, Collins Aerospace.
Developed with the sponsorship of Defense Advanced Research Projects Agency (DARPA).
Permission is hereby granted, free of charge, to any person obtaining a copy of this data,
including any software or models in source or binary form, as well as any drawings, specifications,
and documentation (collectively "the Data"), to deal in the Data without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Data, and to permit persons to whom the Data is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Data.
THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
'''
import os
import re
import sys
from github3 import GitHub
from pprint import pformat
GITHUB_API = 'https://api.github.com/repos'
GITHUB_RELEASES = 'releases'
AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None
REPOSITORY_OWNER = 'loonwerks'
REPOSITORY_REPO = 'AGREE'
PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*')
def manage_daily_builds(sname):
print('Managing builds matching %s' % (sname))
# obtain git handle
gh = GitHub(GITHUB_API, token=AUTH_TOKEN)
repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO)
# get list of releases
releases = repository.releases()
# extract keys and sort by build date
release_keys = {x.id : x.created_at for x in releases if sname in x.name}
sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1])
print('%s' % (pformat(sorted_keys)))
# filter to obtain the keys to delete
delete_keys = [v[0] for v in sorted_keys[2:]]
print('Deleting releases: %s' % (pformat(delete_keys)))
# iterate, deleting the releases and corresponding tags
for rel in releases:
print('examining rel %d from %s...' % (rel.id, str(rel.created_at)))
if rel.id in delete_keys and rel.tag_name is not None:
print(' deleting release id %d and tag %s.' % (rel.id, rel.tag_name))
rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name))
rel.delete()
if rel_tag_ref is not None:
print(' deleting tag %s' % (rel_tag_ref.ref))
rel_tag_ref.delete()
else:
# Look for stale files in the release
assets = rel.assets()
print('In release %s found assets:' % (rel.name))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)])
latest_build_time = build_times[-1] if build_times else None
print('Lastest build time is %s' % (latest_build_time))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
# print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
if match is not None:
asset_build_time = match.group(1)
if asset_build_time != latest_build_time:
print('deleting stale asset %s' % (asset.name))
asset.delete()
if __name__ == '__main__':
manage_daily_builds(sys.argv[1])
| [((38, 24, 38, 108), 're.compile', 're.compile', ({(38, 35, 38, 107): '"""com.rockwellcollins.atc.agree.repository-\\\\d+\\\\.\\\\d+\\\\.\\\\d+(-(\\\\d{12}))?-.*"""'}, {}), "(\n 'com.rockwellcollins.atc.agree.repository-\\\\d+\\\\.\\\\d+\\\\.\\\\d+(-(\\\\d{12}))?-.*'\n )", False, 'import re\n'), ((43, 9, 43, 45), 'github3.GitHub', 'GitHub', (), '', False, 'from github3 import GitHub\n'), ((33, 53, 33, 70), 'os.environ.keys', 'os.environ.keys', ({}, {}), '()', False, 'import os\n'), ((50, 18, 50, 38), 'pprint.pformat', 'pformat', ({(50, 26, 50, 37): 'sorted_keys'}, {}), '(sorted_keys)', False, 'from pprint import pformat\n'), ((53, 37, 53, 57), 'pprint.pformat', 'pformat', ({(53, 45, 53, 56): 'delete_keys'}, {}), '(delete_keys)', False, 'from pprint import pformat\n')] |
KaihuiLiang/ParlAI | tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py | fb5c92741243756516fa50073d34e94ba0b6981e | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test components of specific crowdsourcing tasks.
"""
import json
import os
import unittest
import pandas as pd
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import (
TurnAnnotationsStaticResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
class TestAnalysis(unittest.TestCase):
"""
Test the analysis code for the static turn annotations task.
"""
def test_compile_results(self):
"""
Test compiling results on a dummy set of data.
"""
with testing_utils.tempdir() as tmpdir:
# Define expected stdout
# Paths
analysis_samples_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'analysis_samples'
)
analysis_outputs_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_turn_annotations_static_analysis',
)
expected_stdout_path = os.path.join(
analysis_outputs_folder, 'test_stdout.txt'
)
temp_gold_annotations_path = os.path.join(
tmpdir, 'gold_annotations.json'
)
# Save a file of gold annotations
gold_annotations = {
"1_0_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": True,
},
"1_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
"2_0_5": {
"bucket_0": False,
"bucket_1": True,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": False,
},
"2_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
}
with open(temp_gold_annotations_path, 'w') as f:
json.dump(gold_annotations, f)
# Run compilation of results
parser = TurnAnnotationsStaticResultsCompiler.setup_args()
parser.set_defaults(
**{
'results_folders': analysis_samples_folder,
'output_folder': tmpdir,
'onboarding_in_flight_data_file': os.path.join(
analysis_samples_folder, 'onboarding_in_flight.jsonl'
),
'gold_annotations_file': temp_gold_annotations_path,
}
)
args = parser.parse_args([])
with testing_utils.capture_output() as output:
compiler = TurnAnnotationsStaticResultsCompiler(vars(args))
compiler.NUM_SUBTASKS = 3
compiler.NUM_ANNOTATIONS = 3
compiler.compile_results()
actual_stdout = output.getvalue()
# Check the output against what it should be
check_stdout(
actual_stdout=actual_stdout,
expected_stdout_path=expected_stdout_path,
)
# Check that the saved results file is what it should be
sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx']
expected_results_path = os.path.join(
analysis_outputs_folder, 'expected_results.csv'
)
expected_results = (
pd.read_csv(expected_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
# Drop the 'folder' column, which contains a system-dependent path string
actual_results_rel_path = [
obj for obj in os.listdir(tmpdir) if obj.startswith('results')
][0]
actual_results_path = os.path.join(tmpdir, actual_results_rel_path)
actual_results = (
pd.read_csv(actual_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
if not actual_results.equals(expected_results):
raise ValueError(
f'\n\n\tExpected results:\n{expected_results.to_csv()}'
f'\n\n\tActual results:\n{actual_results.to_csv()}'
)
except ImportError:
pass
if __name__ == "__main__":
unittest.main()
| [((153, 4, 153, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((36, 17, 36, 40), 'parlai.utils.testing.tempdir', 'testing_utils.tempdir', ({}, {}), '()', True, 'import parlai.utils.testing as testing_utils\n'), ((48, 39, 50, 17), 'os.path.join', 'os.path.join', ({(49, 20, 49, 43): 'analysis_outputs_folder', (49, 45, 49, 62): '"""test_stdout.txt"""'}, {}), "(analysis_outputs_folder, 'test_stdout.txt')", False, 'import os\n'), ((51, 45, 53, 17), 'os.path.join', 'os.path.join', ({(52, 20, 52, 26): 'tmpdir', (52, 28, 52, 51): '"""gold_annotations.json"""'}, {}), "(tmpdir, 'gold_annotations.json')", False, 'import os\n'), ((94, 25, 94, 74), 'parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results.TurnAnnotationsStaticResultsCompiler.setup_args', 'TurnAnnotationsStaticResultsCompiler.setup_args', ({}, {}), '()', False, 'from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import TurnAnnotationsStaticResultsCompiler\n'), ((114, 16, 117, 17), 'parlai.crowdsourcing.utils.tests.check_stdout', 'check_stdout', (), '', False, 'from parlai.crowdsourcing.utils.tests import check_stdout\n'), ((121, 40, 123, 17), 'os.path.join', 'os.path.join', ({(122, 20, 122, 43): 'analysis_outputs_folder', (122, 45, 122, 67): '"""expected_results.csv"""'}, {}), "(analysis_outputs_folder, 'expected_results.csv')", False, 'import os\n'), ((134, 38, 134, 83), 'os.path.join', 'os.path.join', ({(134, 51, 134, 57): 'tmpdir', (134, 59, 134, 82): 'actual_results_rel_path'}, {}), '(tmpdir, actual_results_rel_path)', False, 'import os\n'), ((91, 20, 91, 50), 'json.dump', 'json.dump', ({(91, 30, 91, 46): 'gold_annotations', (91, 48, 91, 49): 'f'}, {}), '(gold_annotations, f)', False, 'import json\n'), ((106, 21, 106, 51), 'parlai.utils.testing.capture_output', 'testing_utils.capture_output', ({}, {}), '()', True, 'import parlai.utils.testing as testing_utils\n'), ((42, 36, 42, 61), 'os.path.abspath', 'os.path.abspath', ({(42, 52, 42, 60): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((45, 36, 45, 61), 'os.path.abspath', 'os.path.abspath', ({(45, 52, 45, 60): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((99, 58, 101, 25), 'os.path.join', 'os.path.join', ({(100, 28, 100, 51): 'analysis_samples_folder', (100, 53, 100, 81): '"""onboarding_in_flight.jsonl"""'}, {}), "(analysis_samples_folder, 'onboarding_in_flight.jsonl')", False, 'import os\n'), ((132, 35, 132, 53), 'os.listdir', 'os.listdir', ({(132, 46, 132, 52): 'tmpdir'}, {}), '(tmpdir)', False, 'import os\n'), ((125, 20, 125, 54), 'pandas.read_csv', 'pd.read_csv', ({(125, 32, 125, 53): 'expected_results_path'}, {}), '(expected_results_path)', True, 'import pandas as pd\n'), ((136, 20, 136, 52), 'pandas.read_csv', 'pd.read_csv', ({(136, 32, 136, 51): 'actual_results_path'}, {}), '(actual_results_path)', True, 'import pandas as pd\n')] |
bartongroup/slivka-bio | scripts/selectors.py | 049aee943503963ce5c9b14267fe001edd8e0125 | def example_selector(*args, **kwargs): return "default"
| [] |
mith1979/ansible_automation | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py | 013dfa67c6d91720b787fadb21de574b6e023a26 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_plugin
short_description: Adds or removes plugins to RabbitMQ
description:
- Enables or disables RabbitMQ plugins
version_added: "1.1"
author: Chris Hoffman
options:
names:
description:
- Comma-separated list of plugin names
required: true
default: null
aliases: [name]
new_only:
description:
- Only enable missing plugins
- Does not disable plugins that are not in the names list
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if plugins are to be enabled or disabled
required: false
default: enabled
choices: [enabled, disabled]
prefix:
description:
- Specify a custom install prefix to a Rabbit
required: false
version_added: "1.3"
default: null
'''
EXAMPLES = '''
# Enables the rabbitmq_management plugin
- rabbitmq_plugin: names=rabbitmq_management state=enabled
'''
class RabbitMqPlugins(object):
def __init__(self, module):
self.module = module
if module.params['prefix']:
self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins"
else:
self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmq_plugins]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get_all(self):
return self._exec(['list', '-E', '-m'], True)
def enable(self, name):
self._exec(['enable', name])
def disable(self, name):
self._exec(['disable', name])
def main():
arg_spec = dict(
names=dict(required=True, aliases=['name']),
new_only=dict(default='no', type='bool'),
state=dict(default='enabled', choices=['enabled', 'disabled']),
prefix=dict(required=False, default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
names = module.params['names'].split(',')
new_only = module.params['new_only']
state = module.params['state']
rabbitmq_plugins = RabbitMqPlugins(module)
enabled_plugins = rabbitmq_plugins.get_all()
enabled = []
disabled = []
if state == 'enabled':
if not new_only:
for plugin in enabled_plugins:
if plugin not in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
for name in names:
if name not in enabled_plugins:
rabbitmq_plugins.enable(name)
enabled.append(name)
else:
for plugin in enabled_plugins:
if plugin in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
changed = len(enabled) > 0 or len(disabled) > 0
module.exit_json(changed=changed, enabled=enabled, disabled=disabled)
# import module snippets
from ansible.module_utils.basic import *
main()
| [] |
HoonMinJeongUm/Hunmin-vitrage | vitrage/datasources/static/driver.py | 37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6 | # Copyright 2016 - Nokia, ZTE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
from six.moves import reduce
from oslo_log import log
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import GraphAction
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.static import STATIC_DATASOURCE
from vitrage.datasources.static import StaticFields
from vitrage.utils import file as file_utils
LOG = log.getLogger(__name__)
class StaticDriver(DriverBase):
# base fields are required for all entities, others are treated as metadata
BASE_FIELDS = {StaticFields.STATIC_ID,
StaticFields.TYPE,
StaticFields.ID}
def __init__(self, conf):
super(StaticDriver, self).__init__()
self.cfg = conf
self.entities_cache = []
@staticmethod
def _is_valid_config(config):
"""check for validity of configuration"""
# TODO(yujunz) check with yaml schema or reuse template validation
return StaticFields.DEFINITIONS in config
@staticmethod
def get_event_types():
return []
def enrich_event(self, event, event_type):
pass
def get_all(self, datasource_action):
return self.make_pickleable(self._get_and_cache_all_entities(),
STATIC_DATASOURCE,
datasource_action)
def get_changes(self, datasource_action):
return self.make_pickleable(self._get_and_cache_changed_entities(),
STATIC_DATASOURCE,
datasource_action)
def _get_and_cache_all_entities(self):
self.entities_cache = self._get_all_entities()
return self.entities_cache
def _get_all_entities(self):
files = file_utils.list_files(self.cfg.static.directory, '.yaml', True)
return list(reduce(chain, [self._get_entities_from_file(path)
for path in files], []))
def _get_and_cache_changed_entities(self):
changed_entities = []
new_entities = self._get_all_entities()
for new_entity in new_entities:
old_entity = self._find_entity(new_entity, self.entities_cache)
if old_entity:
# Add modified entities
if not self._equal_entities(old_entity, new_entity):
changed_entities.append(new_entity.copy())
else:
# Add new entities
changed_entities.append(new_entity.copy())
# Add deleted entities
for old_entity in self.entities_cache:
if not self._find_entity(old_entity, new_entities):
old_entity_copy = old_entity.copy()
old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY
changed_entities.append(old_entity_copy)
self.entities_cache = new_entities
return changed_entities
@classmethod
def _get_entities_from_file(cls, path):
config = file_utils.load_yaml_file(path)
if not cls._is_valid_config(config):
LOG.warning("Skipped invalid config (possible obsoleted): {}"
.format(path))
return []
definitions = config[StaticFields.DEFINITIONS]
entities = definitions[StaticFields.ENTITIES]
relationships = definitions[StaticFields.RELATIONSHIPS]
return cls._pack(entities, relationships)
@classmethod
def _pack(cls, entities, relationships):
entities_dict = {}
for entity in entities:
cls._pack_entity(entities_dict, entity)
for rel in relationships:
cls._pack_rel(entities_dict, rel)
return entities_dict.values()
@classmethod
def _pack_entity(cls, entities_dict, entity):
static_id = entity[StaticFields.STATIC_ID]
if static_id not in entities_dict:
metadata = {key: value for key, value in entity.items()
if key not in cls.BASE_FIELDS}
entities_dict[static_id] = entity
entity[StaticFields.RELATIONSHIPS] = []
entity[StaticFields.METADATA] = metadata
else:
LOG.warning("Skipped duplicated entity: {}".format(entity))
@classmethod
def _pack_rel(cls, entities_dict, rel):
source_id = rel[StaticFields.SOURCE]
target_id = rel[StaticFields.TARGET]
if source_id == target_id:
# self pointing relationship
entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel)
else:
source, target = entities_dict[source_id], entities_dict[target_id]
source[StaticFields.RELATIONSHIPS].append(
cls._expand_neighbor(rel, target))
@staticmethod
def _expand_neighbor(rel, neighbor):
"""Expand config id to neighbor entity
rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'}
neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}
result={'relationship_type': 'attached', 'source': 's1',
'target': {'static_id': 'h1',
'vitrage_type': 'host.nova',
'id': 1}}
"""
rel = rel.copy()
if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.SOURCE] = neighbor
elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.TARGET] = neighbor
else:
# TODO(yujunz) raise exception and ignore invalid relationship
LOG.error("Invalid neighbor {} for relationship {}"
.format(neighbor, rel))
return None
return rel
@staticmethod
def _find_entity(search_entity, entities):
# naive implementation since we don't expect many static entities
for entity in entities:
if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \
and entity[StaticFields.ID] == \
search_entity[StaticFields.ID]:
return entity
@staticmethod
def _equal_entities(old_entity, new_entity):
# TODO(iafek): compare also the relationships
return old_entity.get(StaticFields.TYPE) == \
new_entity.get(StaticFields.TYPE) and \
old_entity.get(StaticFields.ID) == \
new_entity.get(StaticFields.ID) and \
old_entity.get(StaticFields.NAME) == \
new_entity.get(StaticFields.NAME) and \
old_entity.get(StaticFields.STATE) == \
new_entity.get(StaticFields.STATE)
| [((27, 6, 27, 29), 'oslo_log.log.getLogger', 'log.getLogger', ({(27, 20, 27, 28): '__name__'}, {}), '(__name__)', False, 'from oslo_log import log\n'), ((69, 16, 69, 79), 'vitrage.utils.file.list_files', 'file_utils.list_files', ({(69, 38, 69, 63): 'self.cfg.static.directory', (69, 65, 69, 72): '""".yaml"""', (69, 74, 69, 78): 'True'}, {}), "(self.cfg.static.directory, '.yaml', True)", True, 'from vitrage.utils import file as file_utils\n'), ((100, 17, 100, 48), 'vitrage.utils.file.load_yaml_file', 'file_utils.load_yaml_file', ({(100, 43, 100, 47): 'path'}, {}), '(path)', True, 'from vitrage.utils import file as file_utils\n')] |
marshuang80/napari | napari/layers/shapes/mesh.py | 10f1d0f39fe9ccd42456c95458e2f23b59450f02 | import numpy as np
class Mesh:
"""Contains meshses of shapes that will ultimately get rendered.
Attributes
----------
vertices : np.ndarray
Qx2 array of vertices of all triangles for shapes including edges and
faces
vertices_centers : np.ndarray
Qx2 array of centers of vertices of triangles for shapes. For vertices
corresponding to faces these are the same as the actual vertices. For
vertices corresponding to edges these values should be added to a
scaled `vertices_offsets` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_offsets : np.ndarray
Qx2 array of offsets of vertices of triangles for shapes. For vertices
corresponding to faces these are 0. For vertices corresponding to
edges these values should be scaled and added to the
`vertices_centers` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_index : np.ndarray
Qx2 array of the index (0, ..., N-1) of each shape that each vertex
corresponds and the mesh type (0, 1) for face or edge.
triangles : np.ndarray
Px3 array of vertex indices that form the mesh triangles
triangles_index : np.ndarray
Px2 array of the index (0, ..., N-1) of each shape that each triangle
corresponds and the mesh type (0, 1) for face or edge.
triangles_colors : np.ndarray
Px4 array of the rgba color of each triangle
triangles_z_order : np.ndarray
Length P array of the z order of each triangle. Must be a permutation
of (0, ..., P-1)
Extended Summary
----------
_types : list
Length two list of the different mesh types corresponding to faces and
edges
"""
_types = ['face', 'edge']
def __init__(self):
self.clear()
def clear(self):
"""Resets mesh data
"""
self.vertices = np.empty((0, 2))
self.vertices_centers = np.empty((0, 2))
self.vertices_offsets = np.empty((0, 2))
self.vertices_index = np.empty((0, 2), dtype=int)
self.triangles = np.empty((0, 3), dtype=np.uint32)
self.triangles_index = np.empty((0, 2), dtype=int)
self.triangles_colors = np.empty((0, 4))
self.triangles_z_order = np.empty((0), dtype=int)
| [((54, 24, 54, 40), 'numpy.empty', 'np.empty', ({(54, 33, 54, 39): '(0, 2)'}, {}), '((0, 2))', True, 'import numpy as np\n'), ((55, 32, 55, 48), 'numpy.empty', 'np.empty', ({(55, 41, 55, 47): '(0, 2)'}, {}), '((0, 2))', True, 'import numpy as np\n'), ((56, 32, 56, 48), 'numpy.empty', 'np.empty', ({(56, 41, 56, 47): '(0, 2)'}, {}), '((0, 2))', True, 'import numpy as np\n'), ((57, 30, 57, 57), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((58, 25, 58, 58), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((59, 31, 59, 58), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((60, 32, 60, 48), 'numpy.empty', 'np.empty', ({(60, 41, 60, 47): '(0, 4)'}, {}), '((0, 4))', True, 'import numpy as np\n'), ((61, 33, 61, 57), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n')] |
cdacos/astrophysics_with_a_pc | python/helpers.py | b0017856005a4771fbd89c8137fb320b72b1b633 | import sys
def start_parameter(text, i):
if len(sys.argv) > i:
print('{0}{1}'.format(text, sys.argv[i]))
return float(sys.argv[i])
else:
return float(raw_input(text))
| [] |
MarioCarrilloA/stx-packaging | configs/docker-ubuntu-img/para.py | 56cf32c4d65ba20f9317102d922ce946a800527d | #!/usr/bin/python3
# vim:se tw=0 sts=4 ts=4 et ai:
"""
Copyright © 2014 Osamu Aoki
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import pwd
import sys
import time
import debmake.read
###########################################################################
# undefined environment variable -> ''
def env(var):
try:
return os.environ[var]
except KeyError:
return ''
#######################################################################
# Initialize parameters
#######################################################################
def para(para):
debmail = env('DEBEMAIL')
if not debmail:
#debmail = os.getlogin() + '@localhost'
debemail = pwd.getpwuid(os.getuid())[0] + '@localhost'
debfullname = env('DEBFULLNAME')
if not debfullname:
# os.getlogin may not work well: #769392
#debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0]
debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0]
#######################################################################
# command line setting
#######################################################################
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = '''\
{0}: make Debian source package Version: {1}
{2}
{0} helps to build the Debian package from the upstream source.
Normally, this is done as follows:
* The upstream tarball is downloaded as the package-version.tar.gz file.
* It is untared to create many files under the package-version/ directory.
* {0} is invoked in the package-version/ directory possibly without any arguments.
* Files in the package-version/debian/ directory are manually adjusted.
* dpkg-buildpackage (usually from its wrapper debuild or pdebuild) is invoked in the package-version/ directory to make debian packages.
Argument may need to be quoted to protect from the shell.
'''.format(
para['program_name'],
para['program_version'],
para['program_copyright']),
epilog='See debmake(1) manpage for more.')
ck = p.add_mutually_exclusive_group()
ck.add_argument(
'-c',
'--copyright',
action = 'count',
default = 0,
help = 'scan source for copyright+license text and exit')
ck.add_argument(
'-k',
'--kludge',
action = 'count',
default = 0,
help = 'compare debian/copyright with the source and exit')
sp = p.add_mutually_exclusive_group()
sp.add_argument(
'-n',
'--native',
action = 'store_true',
default = False,
help = 'make a native source package without .orig.tar.gz')
sp.add_argument(
'-a',
'--archive',
type = str,
action = 'store',
default = '',
help = 'use the upstream source tarball directly (-p, -u, -z: overridden)',
metavar = 'package-version.tar.gz')
sp.add_argument(
'-d',
'--dist',
action = 'store_true',
default = False,
help = 'run "make dist" equivalent first to generate upstream tarball and use it')
sp.add_argument(
'-t',
'--tar',
action = 'store_true',
default = False,
help = 'run "tar" to generate upstream tarball and use it')
p.add_argument(
'-p',
'--package',
action = 'store',
default = '',
help = 'set the Debian package name',
metavar = 'package')
p.add_argument(
'-u',
'--upstreamversion',
action = 'store',
default = '',
help = 'set the upstream package version',
metavar = 'version')
p.add_argument(
'-r',
'--revision',
action = 'store',
default = '',
help = 'set the Debian package revision',
metavar = 'revision')
p.add_argument(
'-z',
'--targz',
action = 'store',
default = '',
help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)',
metavar = 'extension')
p.add_argument(
'-b',
'--binaryspec',
action = 'store',
default = '',
help = 'set binary package specs as comma separated list of "binarypackage":"type" pairs, e.g., in full form "foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev" or in short form ",-doc,libfoo1,libfoo1-dbg, libfoo-dev". Here, "binarypackage" is the binary package name; and optional "type" is chosen from "bin", "data", "dbg", "dev", "doc", "lib", "perl", "python", "python3", "ruby", and "script". If "type" is not specified but obvious, it is set by "binarypackage". Otherwise it is set to "bin" for the compiled ELF binary.',
metavar = 'binarypackage[:type]')
p.add_argument(
'-e',
'--email',
action = 'store',
default = debmail,
help = 'set e-mail address',
metavar = '[email protected]')
p.add_argument(
'-f',
'--fullname',
action = 'store',
default = debfullname,
help = 'set the fullname',
metavar = '"firstname lastname"')
# p.add_argument(
# '-g',
# '--gui',
# action = 'store_true',
# default = False,
# help = 'run GUI configuration')
#
# -h : used by argparse for --help
ep = p.add_mutually_exclusive_group()
ep.add_argument(
'-i',
'--invoke',
default = '',
action = 'store',
help = 'invoke package build tool',
metavar = '[debuild|pdebuild|...]')
ep.add_argument(
'-j',
'--judge',
action = 'store_true',
default = False,
help = 'run "dpkg-depcheck" to judge build dependencies and identify file paths')
p.add_argument(
'-l',
'--license',
default = '',
action = 'store',
help = 'add formatted license to debian/copyright',
metavar = '"license_file"')
p.add_argument(
'-m',
'--monoarch',
action = 'store_true',
default = False,
help = 'force packages to be non-multiarch')
p.add_argument(
'-o',
'--option',
default = '',
action = 'store',
help = 'read optional parameters from "file"',
metavar = '"file"')
p.add_argument(
'-q',
'--quitearly',
action = 'store_true',
default = False,
help='quit early before creating files in the debian directory')
p.add_argument(
'-s',
'--spec',
action = 'store_true',
default = False,
help = 'use upstream spec')
p.add_argument(
'-v',
'--version',
action = 'store_true',
default = False,
help = 'show version information')
p.add_argument(
'-w',
'--with',
action = 'store',
default = '',
dest = 'withargs',
help = 'set additional "dh --with" option arguments',
metavar = 'args')
p.add_argument(
'-x',
'--extra',
default = '',
action = 'store',
help = 'generate extra configuration files as templates',
metavar = '[01234]')
p.add_argument(
'-y',
'--yes',
action = 'count',
default = 0,
help = '"force yes" for all prompts')
p.add_argument(
'-L',
'--local',
action = 'store_true',
default = False,
help='generate configuration files for the local package')
p.add_argument(
'-P',
'--pedantic',
action = 'store_true',
default = False,
help='pedantically check auto-generated files')
p.add_argument(
'-T',
'--tutorial',
action = 'store_true',
default = False,
help='output tutorial comment lines in template files')
args = p.parse_args()
#######################################################################
# Set parameter values
#######################################################################
############################################# -a
if args.archive:
para['archive'] = True
para['tarball'] = args.archive
else:
para['archive'] = False
para['tarball'] = ''
#############################################
para['binaryspec'] = args.binaryspec # -b
para['copyright'] = min(args.copyright, 6) # -c
if para['copyright'] >=4:
para['copyright'] = 3 - para['copyright']
# 0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive
para['dist'] = args.dist # -d
para['email'] = args.email # -e
para['fullname'] = args.fullname # -f
# para['gui'] = args.gui # -g
para['invoke'] = args.invoke # -i
para['judge'] = args.judge # -j
if para['judge']:
para['override'].update({'judge'})
para['kludge'] = args.kludge # -k
############################################# -l
# --license: args.license -> para['license'] as set
if args.license == '':
para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*',
'[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default
else:
para['license'] = set(args.copyright.split(','))
#############################################
para['monoarch'] = args.monoarch # -m
para['native'] = args.native # -n
para['package'] = args.package.lower() # -p
#############################################
para['quitearly'] = args.quitearly # -q
para['revision'] = args.revision # -r
para['spec'] = args.spec # -s
para['tar'] = args.tar # -t
para['version'] = args.upstreamversion # -u
para['print_version'] = args.version # -v
############################################# -w
# --with: args.withargs -> para['dh_with'] as set
if args.withargs == '':
para['dh_with'] = set() # default is empty set
else:
para['dh_with'] = set(args.withargs.split(','))
#############################################
para['extra'] = args.extra # -x
para['yes'] = min(args.yes, 2) # -y
# 0: ask, 1: yes, 2: no
para['targz'] = args.targz # -z
para['local'] = args.local # -L
para['pedantic'] = args.pedantic # -P
para['tutorial'] = args.tutorial # -T
############################################# -o
if args.option:
exec(debmake.read.read(args.option))
#######################################################################
# return command line parameters
#######################################################################
return para
#######################################################################
# Test code
#######################################################################
if __name__ == '__main__':
for p, v in para().items():
print("para['{}'] = \"{}\"".format(p,v))
| [((45, 32, 45, 43), 'os.getuid', 'os.getuid', ({}, {}), '()', False, 'import os\n'), ((50, 36, 50, 47), 'os.getuid', 'os.getuid', ({}, {}), '()', False, 'import os\n')] |
lukejamison/jet-dasboard | build/lib/jet_django/views/model.py | 5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9 | from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
class AggregateSerializer(serializers.Serializer):
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
class GroupSerializer(serializers.Serializer):
group = serializers.CharField()
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'group_serializer' in kwargs:
self.fields['group'] = kwargs.pop('group_serializer')
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
class Viewset(viewsets.ModelViewSet):
model = build_model
queryset = build_queryset
pagination_class = CustomPageNumberPagination
filter_class = build_filter_class
authentication_classes = ()
permission_classes = (HasProjectPermissions, ModifyNotInDemo)
def get_serializer_class(self):
if self.action == 'aggregate':
return AggregateSerializer
elif self.action == 'group':
return GroupSerializer
elif self.action == 'retrieve':
return build_detail_serializer_class
else:
return build_serializer_class
@list_route(methods=['get'])
def aggregate(self, request):
queryset = self.filter_queryset(self.get_queryset())
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
y_field = self.model._meta.get_field(y_column)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = AggregateFilter().filter(queryset, {
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
y_func_serializer=y_serializer
)
return Response(serializer.data)
@list_route(methods=['get'])
def group(self, request):
queryset = self.filter_queryset(self.get_queryset())
x_column = request.GET['_x_column']
x_lookup_name = request.GET.get('_x_lookup')
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
x_field = self.model._meta.get_field(x_column)
x_lookup = x_field.class_lookups.get(x_lookup_name)
y_field = self.model._meta.get_field(y_column)
if x_lookup:
x_field = x_lookup('none').output_field
x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)
x_serializer = x_serializer_class(**x_serializer_kwargs)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = GroupFilter().filter(queryset, {
'x_column': x_column,
'x_lookup': x_lookup,
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
many=True,
group_serializer=x_serializer,
y_func_serializer=y_serializer
)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@list_route(methods=['post'])
def reorder(self, request):
serializer = ReorderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@list_route(methods=['post'])
def reset_order(self, request):
i = 1
for instance in build_queryset:
setattr(instance, ordering_field, i)
instance.save()
i += 1
return Response({})
for action in build_actions:
def route(self, request):
form = action(data=request.data)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
queryset = form.filer_queryset(self.get_queryset())
try:
result = form.save(queryset)
except Exception as e:
return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response({'action': form._meta.name, 'result': result})
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
| [((15, 13, 15, 39), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ({}, {}), '()', False, 'from rest_framework import status, viewsets, serializers\n'), ((25, 12, 25, 35), 'rest_framework.serializers.CharField', 'serializers.CharField', ({}, {}), '()', False, 'from rest_framework import status, viewsets, serializers\n'), ((26, 13, 26, 39), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ({}, {}), '()', False, 'from rest_framework import status, viewsets, serializers\n'), ((39, 24, 39, 82), 'jet_django.serializers.reorder.reorder_serializer_factory', 'reorder_serializer_factory', ({(39, 51, 39, 65): 'build_queryset', (39, 67, 39, 81): 'ordering_field'}, {}), '(build_queryset, ordering_field)', False, 'from jet_django.serializers.reorder import reorder_serializer_factory\n'), ((59, 9, 59, 36), 'rest_framework.decorators.list_route', 'list_route', (), '', False, 'from rest_framework.decorators import list_route\n'), ((83, 9, 83, 36), 'rest_framework.decorators.list_route', 'list_route', (), '', False, 'from rest_framework.decorators import list_route\n'), ((129, 9, 129, 37), 'rest_framework.decorators.list_route', 'list_route', (), '', False, 'from rest_framework.decorators import list_route\n'), ((136, 9, 136, 37), 'rest_framework.decorators.list_route', 'list_route', (), '', False, 'from rest_framework.decorators import list_route\n'), ((161, 20, 161, 48), 'rest_framework.decorators.list_route', 'list_route', (), '', False, 'from rest_framework.decorators import list_route\n'), ((81, 19, 81, 44), 'rest_framework.response.Response', 'Response', ({(81, 28, 81, 43): 'serializer.data'}, {}), '(serializer.data)', False, 'from rest_framework.response import Response\n'), ((118, 19, 118, 44), 'rest_framework.response.Response', 'Response', ({(118, 28, 118, 43): 'serializer.data'}, {}), '(serializer.data)', False, 'from rest_framework.response import Response\n'), ((134, 19, 134, 44), 'rest_framework.response.Response', 'Response', ({(134, 28, 134, 43): 'serializer.data'}, {}), '(serializer.data)', False, 'from rest_framework.response import Response\n'), ((143, 19, 143, 31), 'rest_framework.response.Response', 'Response', ({(143, 28, 143, 30): '{}'}, {}), '({})', False, 'from rest_framework.response import Response\n'), ((159, 19, 159, 74), 'rest_framework.response.Response', 'Response', ({(159, 28, 159, 73): "{'action': form._meta.name, 'result': result}"}, {}), "({'action': form._meta.name, 'result': result})", False, 'from rest_framework.response import Response\n'), ((150, 23, 150, 80), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((68, 54, 68, 71), 'rest_framework.serializers.ModelSerializer', 'ModelSerializer', ({}, {}), '()', False, 'from rest_framework.serializers import ModelSerializer\n'), ((71, 23, 71, 40), 'jet_django.filters.model_aggregate.AggregateFilter', 'AggregateFilter', ({}, {}), '()', False, 'from jet_django.filters.model_aggregate import AggregateFilter\n'), ((99, 54, 99, 71), 'rest_framework.serializers.ModelSerializer', 'ModelSerializer', ({}, {}), '()', False, 'from rest_framework.serializers import ModelSerializer\n'), ((102, 54, 102, 71), 'rest_framework.serializers.ModelSerializer', 'ModelSerializer', ({}, {}), '()', False, 'from rest_framework.serializers import ModelSerializer\n'), ((105, 23, 105, 36), 'jet_django.filters.model_group.GroupFilter', 'GroupFilter', ({}, {}), '()', False, 'from jet_django.filters.model_group import GroupFilter\n')] |
LeGamermc/ursina_tutorials | python_minecraft_tut_2021/weatherCraft.py | f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0 | """
Weather functions.
"""
from ursina import color, window, time
from nMap import nMap
class Weather:
def __init__(this, rate=1):
this.red = 0
this.green = 200
this.blue = 211
this.darkling = 0
this.rate = rate
this.towardsNight = 1
def setSky(this):
r = nMap(this.darkling,0,100,0,this.red)
g = nMap(this.darkling,0,100,0,this.green)
b = nMap(this.darkling,0,100,0,this.blue)
window.color = color.rgb(r,g,b)
def update(this):
this.darkling -= ( this.rate *
this.towardsNight *
time.dt)
if this.darkling < 0:
this.towardsNight *= -1
this.darkling = 0
this.setSky()
| [((20, 12, 20, 48), 'nMap.nMap', 'nMap', ({(20, 17, 20, 30): 'this.darkling', (20, 31, 20, 32): '0', (20, 33, 20, 36): '100', (20, 37, 20, 38): '0', (20, 39, 20, 47): 'this.red'}, {}), '(this.darkling, 0, 100, 0, this.red)', False, 'from nMap import nMap\n'), ((21, 12, 21, 50), 'nMap.nMap', 'nMap', ({(21, 17, 21, 30): 'this.darkling', (21, 31, 21, 32): '0', (21, 33, 21, 36): '100', (21, 37, 21, 38): '0', (21, 39, 21, 49): 'this.green'}, {}), '(this.darkling, 0, 100, 0, this.green)', False, 'from nMap import nMap\n'), ((22, 12, 22, 49), 'nMap.nMap', 'nMap', ({(22, 17, 22, 30): 'this.darkling', (22, 31, 22, 32): '0', (22, 33, 22, 36): '100', (22, 37, 22, 38): '0', (22, 39, 22, 48): 'this.blue'}, {}), '(this.darkling, 0, 100, 0, this.blue)', False, 'from nMap import nMap\n'), ((23, 23, 23, 39), 'ursina.color.rgb', 'color.rgb', ({(23, 33, 23, 34): 'r', (23, 35, 23, 36): 'g', (23, 37, 23, 38): 'b'}, {}), '(r, g, b)', False, 'from ursina import color, window, time\n')] |
davefancella/davenetgame | davenetgame/dispatch/dispatcher.py | f16c36539a3898ab4a021e63feef7fe497e5bc69 | #!/usr/bin/env python3
'''
Copyright 2016 Dave Fancella
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import threading, time
from davenetgame.dispatch.base import DispatcherBase
from davenetgame.protocol import connection
## @file dispatcher
#
# This file contains the standard, generic EventDispatcher class. It's the one you use if
# the library doesn't support your preferred game engine, or if you'd rather manage the library
# independently of your game engine.
## This is the standard EventDispatcher.
class EventDispatcher(DispatcherBase):
pass
## This is a special server-oriented EventDispatcher that provides for an interactive console
# on the server when run in a terminal. This is probably most useful for testing the library,
# though it's not unheard of for a server to run in a terminal and have a console.
class EventDispatcherServer(DispatcherBase):
__console = None
__consolecommands = None
def __init__(self, **args):
super().__init__(**args)
self.__console = ConsoleInput()
self.__consolecommands = []
# Register the standard commands available to every game server.
self.RegisterCommand('show', self.consoleShow, "show (connections)", "Show whatever you want to see.")
self.RegisterCommand('help', self.consoleHelp, "help [command]", "print this helpful text. Alternately, type in a command to see its helpful text.")
self.RegisterCommand('quit', self.consoleQuit, "quit", "Quit the server.")
def Start(self):
self.__console.Start()
super().Start()
def Update(self, timestep):
try:
while self.__console.HasPending():
msg = self.__console.pop()
args = msg.split(" ")
command = args.pop(0)
command = command.lower()
# Ignore simple presses of enter
if command == '':
continue
foundcommand = False
for a in self.__consolecommands:
if a.command() == command:
a.callback(*args)
foundcommand = True
if not foundcommand:
print("Command not recognized: " + command)
except:
pass
super().Update(timestep)
## @name Console API
#
# These methods give access to the built-in server console and the various commands that
# can be created.
#@{
## Console command: show
def consoleShow(self, *args):
if len(args) != 1:
print("Usage: show (connections)")
else:
if args[0] == "connections":
if len(self.GetConnections() ) == 0:
print("There are no connections at this time.")
else:
for a in self.GetConnections():
print("{0:3}: {1:40} {2:10} {3:4}".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) )
else:
print("Unknown thing to show: " + args[0])
## Console command: help
def consoleHelp(self, *args):
if len(args) > 0:
for a in self.__consolecommands:
if a.command() == args[0]:
print("%10s : %s" % (args[0], a.helplong() ))
print("%13s %s" % (" ", a.helpshort() ))
print
else:
print("Command not found.")
else:
for a in self.__consolecommands:
print("%10s : %s" % (a.command(), a.helplong() ))
print("%13s %s" % (" ", a.helpshort() ))
print()
## Console command: quit
def consoleQuit(self, *args):
print("Quit signaled from console.")
self.Stop()
self.__console.Stop()
## Call to register console commands with the server. The library implements a number of standard
# commands, but games may need their own commands. In that case, you will need your own callbacks.
def RegisterCommand(self, command, callback, helpshort, helplong):
self.__consolecommands.append(ConsoleCommand(
command = command,
callback = callback,
helpshort = helpshort,
helplong = helplong
)
)
#@}
## This class implements console commands. To create a new console command, simply make an instance of
# this class, giving all the keyword arguments in the constructor.
# @param 'command' : the name of the command, what the user types to use it.
# @param 'callback' : a function that will process the command when the user types it.
# @param 'helpshort' : short help text, usually one line of text, preferably not more than 50 characters.
# In output, it will be prepended with "Usage: "
# @param 'helplong' : long help text, can be as long as needed, as many lines as needed. Do not put
# line endings, however. Those will be added as needed. You may put line endings to
# signify paragraph breaks, if need be.
class ConsoleCommand(object):
__command = None
__callback = None
__helpshort = None
__helplong = None
def __init__(self, **args):
# Ensure the command is always lowercase
self.__command = args['command'].strip().lower()
self.__callback = args['callback']
self.__helpshort = args['helpshort']
self.__helplong = args['helplong']
def callback(self, *args):
self.__callback(*args)
def command(self):
return self.__command
def helpshort(self):
return self.__helpshort
def helplong(self):
return self.__helplong
## This class makes the console input non-blocking.
class ConsoleInput(threading.Thread):
## This is the lock that must be called to avoid thread collisions
__lock = None
## This is a queue of commands, unparsed.
__pcommands = None
def __init__(self, **args):
threading.Thread.__init__(self, **args)
self.__lock = threading.RLock()
self.__pcommands = []
## Call to start the client.
def Start(self):
self.__continue = True
self.start()
## Stops the server. It may still take a few seconds or so. If blocking is "True", then the call will
# block until the server has shut down.
def Stop(self, blocking=False):
self.__continue = False
if blocking:
self.join()
## Returns true if there are pending lines from stdin to work with
def HasPending(self):
if len(self.__pcommands) > 0:
return True
return False
## Starts the console input. Don't call this directly, instead call Start().
def run(self):
while self.__continue:
msg = input(': ')
self.__lock.acquire()
self.__pcommands.append(msg.strip() )
self.__lock.release()
time.sleep(0.01)
## Pops the first item off the commands list and returns it.
def pop(self):
theCommand = None
if len(self.__pcommands) > 0:
self.__lock.acquire()
theCommand = self.__pcommands.pop(0)
self.__lock.release()
return theCommand
| [((185, 8, 185, 47), 'threading.Thread.__init__', 'threading.Thread.__init__', ({(185, 34, 185, 38): 'self'}, {}), '(self, **args)', False, 'import threading, time\n'), ((186, 22, 186, 39), 'threading.RLock', 'threading.RLock', ({}, {}), '()', False, 'import threading, time\n'), ((218, 12, 218, 28), 'time.sleep', 'time.sleep', ({(218, 23, 218, 27): '(0.01)'}, {}), '(0.01)', False, 'import threading, time\n')] |
zenofewords/thebrushstash | account/migrations/0003_customuser_phone_number.py | 7d53bd5f22a2daa1011bb502bce56e735504dc84 | # Generated by Django 2.2.7 on 2019-11-17 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_remove_customuser_full_name'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, max_length=500),
),
]
| [((16, 18, 16, 62), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
gabrieldcpadilha/ListaDeExercicios-PythonBrasil | 03_Estrutura_de_Repeticao/13_potenciacao.py | a92d477468bde5eac8987a26ea79af2ffeb6ad81 | base = int(input('Digite o valor da base: '))
expoente = 0
while expoente <= 0:
expoente = int(input('Digite o valor do expoente: '))
if expoente <= 0:
print('O expoente tem que ser positivo')
potencia = 1
for c in range(1, expoente + 1):
potencia *= base
print(f'{base}^ {expoente} = {potencia}')
| [] |
noahjacob/Accounting | accounting/accounting/doctype/journal_entry/journal_entry.py | 6be90c4f82867156532ca71b1faa9d017e3269af | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Noah Jacob and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt
from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry
class JournalEntry(Document):
def validate(self):
calc_total_debit_credit(self)
if self.difference:
frappe.throw("The total debit and credit must be equal. The current difference is {}".format(self.difference))
if self.total_credit == 0 or self.total_debit == 0 :
frappe.throw('Total Cannot be Zero')
if not self.accounts:
frappe.throw('Account Entries are required')
else:
self.title = self.accounts[0].account
def on_submit(self):
for entry in self.accounts:
make_gl_entry(self,entry.account,entry.debit,entry.credit)
def on_cancel(self):
# cancel gl entry
make_reverse_gl_entry(self,self.doctype,self.name)
def calc_total_debit_credit(self):
self.total_debit, self.total_credit,self.difference = 0,0,0
for entry in self.accounts:
self.total_debit = flt(self.total_debit) +flt(entry.debit)
self.total_credit = flt(self.total_credit) + flt(entry.credit)
self.difference = flt(self.total_debit) - (self.total_credit) | [((35, 2, 35, 52), 'accounting.accounting.general_ledger.make_reverse_gl_entry', 'make_reverse_gl_entry', ({(35, 24, 35, 28): 'self', (35, 29, 35, 41): 'self.doctype', (35, 42, 35, 51): 'self.name'}, {}), '(self, self.doctype, self.name)', False, 'from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry\n'), ((44, 19, 44, 40), 'frappe.utils.flt', 'flt', ({(44, 23, 44, 39): 'self.total_debit'}, {}), '(self.total_debit)', False, 'from frappe.utils import flt\n'), ((18, 3, 18, 39), 'frappe.throw', 'frappe.throw', ({(18, 16, 18, 38): '"""Total Cannot be Zero"""'}, {}), "('Total Cannot be Zero')", False, 'import frappe\n'), ((20, 3, 20, 47), 'frappe.throw', 'frappe.throw', ({(20, 16, 20, 46): '"""Account Entries are required"""'}, {}), "('Account Entries are required')", False, 'import frappe\n'), ((30, 3, 30, 61), 'accounting.accounting.general_ledger.make_gl_entry', 'make_gl_entry', ({(30, 17, 30, 21): 'self', (30, 22, 30, 35): 'entry.account', (30, 36, 30, 47): 'entry.debit', (30, 48, 30, 60): 'entry.credit'}, {}), '(self, entry.account, entry.debit, entry.credit)', False, 'from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry\n'), ((41, 21, 41, 42), 'frappe.utils.flt', 'flt', ({(41, 25, 41, 41): 'self.total_debit'}, {}), '(self.total_debit)', False, 'from frappe.utils import flt\n'), ((41, 44, 41, 60), 'frappe.utils.flt', 'flt', ({(41, 48, 41, 59): 'entry.debit'}, {}), '(entry.debit)', False, 'from frappe.utils import flt\n'), ((42, 22, 42, 44), 'frappe.utils.flt', 'flt', ({(42, 26, 42, 43): 'self.total_credit'}, {}), '(self.total_credit)', False, 'from frappe.utils import flt\n'), ((42, 47, 42, 64), 'frappe.utils.flt', 'flt', ({(42, 51, 42, 63): 'entry.credit'}, {}), '(entry.credit)', False, 'from frappe.utils import flt\n')] |
mmeooo/test_django | polls/models.py | 0364f43549d4082df7100d11c67dd42dc2a82b32 | from django.db import models
# Create your models here.
# 클래스의 기능: 상속
class Question(models.Model): # Table
question_text= models.CharField(max_length= 100) # column, datatype
public_date= models.CharField(max_length= 100)
votes= models.DecimalField(max_digits= 20, decimal_places= 10)
# 위의 2개 타입으로 클래스 만들면 ok
# link, string-> CharField, data-> DecimalField
# 보통 max_length= 100으로 함
class Economics(models.Model):
title= models.CharField(max_length= 100)
href= models.CharField(max_length= 100)
create_date= models.CharField(max_length= 100)
| [((6, 19, 6, 52), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((7, 17, 7, 50), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((8, 11, 8, 66), 'django.db.models.DecimalField', 'models.DecimalField', (), '', False, 'from django.db import models\n'), ((14, 11, 14, 44), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((15, 10, 15, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((16, 17, 16, 50), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n')] |
jack139/HF | ipcam/test_snap.py | 4810f4ee2faf9ab51c867e105addc139da2adfd1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys,os,time
if len(sys.argv)<2:
print "usage: test_snap.py <check|show>"
sys.exit(2)
kam_cmd=sys.argv[1]
path='/var/data2/snap_store'
a=os.listdir(path)
a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used in HF system
if kam_cmd=='show' or kam_cmd=='check':
last_sub=int(time.time()/600)
for i in a:
sub='%s/%s' % (path, i)
b=os.listdir(sub)
if 'capture' in b:
b.remove('capture')
b.sort()
sub2='%s/%s' % (sub, b[-1])
c=os.listdir(sub2)
if kam_cmd=='show' or last_sub-int(b[-1])>3:
print "%s - %d, %s - %d, (%d)" % (i, len(b), b[-1], len(c), last_sub-int(b[-1]))
else:
print "usage: test_snap.py <check|show>"
sys.exit(2)
| [] |
M4rukku/impact_of_non_iid_data_in_federated_learning | sources/datasets/client_dataset_definitions/client_dataset.py | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | import functools
import gc
from abc import ABC
from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents
from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor
from sources.utils.exception_definitions import OutsideOfContextError
def throw_error_outside_context(func):
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
if not self.within_context:
raise OutsideOfContextError(
"""Error: Tried to access client Dataset outside of context
manager. This might lead to data leaks and bad use of
memory. Please wrap the usage of ClientDataset.dataset_x
inside a "with statement". """)
else:
value = func(self, *args, **kwargs)
return value
return wrapper_decorator
class ClientDataset(ABC):
def __init__(self,
client_identifier: str,
client_dataset_loader: ClientDatasetLoader,
client_dataset_processor: ClientDatasetProcessor,
):
self.client_identifier = client_identifier
self.client_dataset_loader = client_dataset_loader
self.client_dataset_processor = client_dataset_processor
self._train_data = None
self._test_data = None
self._validation_data = None
self.within_context = False
def process_x(self, raw_x_batch):
"""Pre-processes each batch of features
before being fed to the model."""
return self.client_dataset_processor.process_x(raw_x_batch)
def process_y(self, raw_y_batch):
"""Pre-processes each batch of labels before being fed to the model."""
return self.client_dataset_processor.process_y(raw_y_batch)
def _lazy_initialise_data(self, data, dataset_component: DatasetComponents):
if data is None:
data = self.client_dataset_loader.load_dataset(self.client_identifier,
dataset_component)
return self.process_x(data["x"]), self.process_y(data["y"])
else:
return data
@property
@throw_error_outside_context
def training_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data
@property
@throw_error_outside_context
def training_data_x(self):
"""Returns the Training Data as an array of samples"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[0]
@property
@throw_error_outside_context
def training_data_y(self):
"""Returns the Classifications for the Training Data as array"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[1]
@property
@throw_error_outside_context
def test_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data
@property
@throw_error_outside_context
def test_data_x(self):
"""Returns the Test Data as an array of samples"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[0]
@property
@throw_error_outside_context
def test_data_y(self):
"""Returns the Classifications for the Test Data as array"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[1]
@property
@throw_error_outside_context
def validation_data(self):
"""Returns the Validation Data as pair of arrays containing the
samples x,
and classification y"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data
@property
@throw_error_outside_context
def validation_data_x(self):
"""Returns the Validation Data as an array of samples"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[0]
@property
@throw_error_outside_context
def validation_data_y(self):
"""Returns the Classifications for the Validation Data as array"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[1]
def __enter__(self):
self.within_context = True
def __exit__(self, exc_type, exc_value, exc_traceback):
self.within_context = False
self._train_data = None
self._test_data = None
self._validation_data = None
gc.collect()
| [((11, 5, 11, 26), 'functools.wraps', 'functools.wraps', ({(11, 21, 11, 25): 'func'}, {}), '(func)', False, 'import functools\n'), ((144, 8, 144, 20), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((14, 18, 18, 47), 'sources.utils.exception_definitions.OutsideOfContextError', 'OutsideOfContextError', ({(15, 16, 18, 46): '"""Error: Tried to access client Dataset outside of context \n manager. This might lead to data leaks and bad use of \n memory. Please wrap the usage of ClientDataset.dataset_x \n inside a "with statement". """'}, {}), '(\n """Error: Tried to access client Dataset outside of context \n manager. This might lead to data leaks and bad use of \n memory. Please wrap the usage of ClientDataset.dataset_x \n inside a "with statement". """\n )', False, 'from sources.utils.exception_definitions import OutsideOfContextError\n')] |
mfrigerio17/robot-model-tools | src/rmt/kinematics.py | 97e25d5c4d1386c503d37a70b57400022c5b7ca0 | import logging
import numpy
import kgprim.motions as motions
import kgprim.ct.frommotions as frommotions
import kgprim.ct.repr.mxrepr as mxrepr
import motiondsl.motiondsl as motdsl
logger = logging.getLogger(__name__)
class RobotKinematics:
'''The composition of the constant poses and the joint poses of a robot.
This class is a simple aggregation of the geometry model and the joint-poses
model. By merging the two, this class have access to the full robot
kinematics.
Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose
between two frames on the robot can be obtained.
'''
def __init__(self, geometry, jointPoses):
self.robotGeometry = geometry
self.jointPoses = jointPoses
self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ]
allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel )
self.framesConnectivity = motions.ConnectedFramesInspector(allPoses)
def base_H_ee(kinematics, framename):
if framename not in kinematics.robotGeometry.framesModel.framesByName:
logger.error("Could not find frame '{0}' in model '{1}'".format(framename, kinematics.robotGeometry.robotName))
return None
ee = kinematics.robotGeometry.framesModel.framesByName[ framename ]
if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame):
logger.error("Frame '{0}' and the base frame do not seem to be connected".format(framename))
return None
poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame)
cotr = frommotions.toCoordinateTransform(poseSpec)
H = mxrepr.hCoordinatesSymbolic(cotr)
q = numpy.zeros( len(H.variables) )
H = H.setVariablesValue( valueslist=q )
return H
def serializeToMotionDSLModel(robotKinematics, ostream):
header ='''
Model {modelname}
Convention = currentFrame
'''.format(modelname=robotKinematics.robotGeometry.robotName)
ostream.write(header)
for jp in robotKinematics.jointPoses.poseSpecByJoint.values():
text = motdsl.poseSpecToMotionDSLSnippet( jp )
ostream.write(text)
ostream.write('\n')
for cp in robotKinematics.robotGeometry.byPose.values() :
text = motdsl.poseSpecToMotionDSLSnippet( cp )
ostream.write(text)
ostream.write('\n')
| [((9, 9, 9, 36), 'logging.getLogger', 'logging.getLogger', ({(9, 27, 9, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((43, 11, 43, 54), 'kgprim.ct.frommotions.toCoordinateTransform', 'frommotions.toCoordinateTransform', ({(43, 45, 43, 53): 'poseSpec'}, {}), '(poseSpec)', True, 'import kgprim.ct.frommotions as frommotions\n'), ((44, 8, 44, 41), 'kgprim.ct.repr.mxrepr.hCoordinatesSymbolic', 'mxrepr.hCoordinatesSymbolic', ({(44, 36, 44, 40): 'cotr'}, {}), '(cotr)', True, 'import kgprim.ct.repr.mxrepr as mxrepr\n'), ((28, 34, 28, 76), 'kgprim.motions.ConnectedFramesInspector', 'motions.ConnectedFramesInspector', ({(28, 67, 28, 75): 'allPoses'}, {}), '(allPoses)', True, 'import kgprim.motions as motions\n'), ((60, 15, 60, 54), 'motiondsl.motiondsl.poseSpecToMotionDSLSnippet', 'motdsl.poseSpecToMotionDSLSnippet', ({(60, 50, 60, 52): 'jp'}, {}), '(jp)', True, 'import motiondsl.motiondsl as motdsl\n'), ((64, 15, 64, 54), 'motiondsl.motiondsl.poseSpecToMotionDSLSnippet', 'motdsl.poseSpecToMotionDSLSnippet', ({(64, 50, 64, 52): 'cp'}, {}), '(cp)', True, 'import motiondsl.motiondsl as motdsl\n')] |
atr0s/awx | awx/main/management/commands/run_dispatcher.py | 388ef077c384f4c5296d4870d3b0cf0e6718db80 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import logging
from multiprocessing import Process
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from kombu import Connection, Exchange, Queue
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true',
help='print the internal state of any running dispatchers')
parser.add_argument('--running', dest='running', action='store_true',
help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument('--reload', dest='reload', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes;'
'running jobs will run to completion first'))
def beat(self):
from celery import Celery
from celery.beat import PersistentScheduler
from celery.apps import beat
class AWXScheduler(PersistentScheduler):
def __init__(self, *args, **kwargs):
self.ppid = os.getppid()
super(AWXScheduler, self).__init__(*args, **kwargs)
def setup_schedule(self):
super(AWXScheduler, self).setup_schedule()
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
def tick(self, *args, **kwargs):
if os.getppid() != self.ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
raise SystemExit()
return super(AWXScheduler, self).tick(*args, **kwargs)
def apply_async(self, entry, producer=None, advance=True, **kwargs):
task = TaskWorker.resolve_callable(entry.task)
result, queue = task.apply_async()
class TaskResult(object):
id = result['uuid']
return TaskResult()
app = Celery()
app.conf.BROKER_URL = settings.BROKER_URL
app.conf.CELERY_TASK_RESULT_EXPIRES = False
beat.Beat(
30,
app,
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
).run()
def handle(self, *arg, **options):
if options.get('status'):
print Control('dispatcher').status()
return
if options.get('running'):
print Control('dispatcher').running()
return
if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'})
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
beat = Process(target=self.beat)
beat.daemon = True
beat.start()
reaper.reap()
consumer = None
with Connection(settings.BROKER_URL) as conn:
try:
bcast = 'tower_broadcast_all'
queues = [
Queue(q, Exchange(q), routing_key=q)
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
]
queues.append(
Queue(
construct_bcast_queue_name(bcast),
exchange=Exchange(bcast, type='fanout'),
routing_key=bcast,
reply=True
)
)
consumer = AWXConsumer(
'dispatcher',
conn,
TaskWorker(),
queues,
AutoscalePool(min_workers=4)
)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()
| [] |
Nuri-benbarka/PCDet | pcdet/utils/box_coder_utils.py | 8da66ead3bb1120db2fa919187948c8c134e85ae | import numpy as np
import torch
from . import common_utils
class ResidualCoder(object):
def __init__(self, code_size=7):
super().__init__()
self.code_size = code_size
@staticmethod
def encode_np(boxes, anchors):
"""
:param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
box_ndim = anchors.shape[-1]
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1)
# need to convert boxes to z-center format
zg = zg + hg / 2
za = za + ha / 2
diagonal = np.sqrt(la ** 2 + wa ** 2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha # 1.6
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=-1)
@staticmethod
def decode_np(box_encodings, anchors):
"""
:param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
box_ndim = anchors.shape[-1]
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1)
# need to convert box_encodings to z-bottom format
za = za + ha / 2
diagonal = np.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
hg = np.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
cgs = [t + a for t, a in zip(cts, cas)]
return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1)
@staticmethod
def encode_torch(boxes, anchors):
"""
:param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)
za = za + ha / 2
zg = zg + hg / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1)
@staticmethod
def decode_torch(box_encodings, anchors):
"""
:param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
za = za + ha / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1)
def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds,
num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False):
"""
:param box_preds: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2)
:return:
"""
batch_box_preds = self.decode_torch(box_preds, anchors)
if dir_cls_preds is not None:
dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1)
if use_binary_dir_classifier:
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte()
batch_box_preds[..., -1] += torch.where(
opp_labels,
torch.tensor(np.pi).type_as(batch_box_preds),
torch.tensor(0.0).type_as(batch_box_preds)
)
else:
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / num_dir_bins)
dir_rot = common_utils.limit_period_torch(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
return batch_box_preds
if __name__ == '__main__':
pass
| [((19, 43, 19, 79), 'numpy.split', 'np.split', (), '', True, 'import numpy as np\n'), ((20, 43, 20, 77), 'numpy.split', 'np.split', (), '', True, 'import numpy as np\n'), ((26, 19, 26, 45), 'numpy.sqrt', 'np.sqrt', ({(26, 27, 26, 44): 'la ** 2 + wa ** 2'}, {}), '(la ** 2 + wa ** 2)', True, 'import numpy as np\n'), ((30, 13, 30, 28), 'numpy.log', 'np.log', ({(30, 20, 30, 27): 'lg / la'}, {}), '(lg / la)', True, 'import numpy as np\n'), ((31, 13, 31, 28), 'numpy.log', 'np.log', ({(31, 20, 31, 27): 'wg / wa'}, {}), '(wg / wa)', True, 'import numpy as np\n'), ((32, 13, 32, 28), 'numpy.log', 'np.log', ({(32, 20, 32, 27): 'hg / ha'}, {}), '(hg / ha)', True, 'import numpy as np\n'), ((35, 15, 35, 74), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((45, 43, 45, 79), 'numpy.split', 'np.split', (), '', True, 'import numpy as np\n'), ((46, 43, 46, 85), 'numpy.split', 'np.split', (), '', True, 'import numpy as np\n'), ((51, 19, 51, 45), 'numpy.sqrt', 'np.sqrt', ({(51, 27, 51, 44): 'la ** 2 + wa ** 2'}, {}), '(la ** 2 + wa ** 2)', True, 'import numpy as np\n'), ((62, 15, 62, 74), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((71, 43, 71, 74), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((72, 43, 72, 72), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((77, 19, 77, 48), 'torch.sqrt', 'torch.sqrt', ({(77, 30, 77, 47): 'la ** 2 + wa ** 2'}, {}), '(la ** 2 + wa ** 2)', False, 'import torch\n'), ((81, 13, 81, 31), 'torch.log', 'torch.log', ({(81, 23, 81, 30): 'lg / la'}, {}), '(lg / la)', False, 'import torch\n'), ((82, 13, 82, 31), 'torch.log', 'torch.log', ({(82, 23, 82, 30): 'wg / wa'}, {}), '(wg / wa)', False, 'import torch\n'), ((83, 13, 83, 31), 'torch.log', 'torch.log', ({(83, 23, 83, 30): 'hg / ha'}, {}), '(hg / ha)', False, 'import torch\n'), ((87, 15, 87, 68), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((96, 43, 96, 74), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((97, 43, 97, 80), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((101, 19, 101, 48), 'torch.sqrt', 'torch.sqrt', ({(101, 30, 101, 47): 'la ** 2 + wa ** 2'}, {}), '(la ** 2 + wa ** 2)', False, 'import torch\n'), ((113, 15, 113, 68), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((56, 13, 56, 23), 'numpy.exp', 'np.exp', ({(56, 20, 56, 22): 'lt'}, {}), '(lt)', True, 'import numpy as np\n'), ((57, 13, 57, 23), 'numpy.exp', 'np.exp', ({(57, 20, 57, 22): 'wt'}, {}), '(wt)', True, 'import numpy as np\n'), ((58, 13, 58, 23), 'numpy.exp', 'np.exp', ({(58, 20, 58, 22): 'ht'}, {}), '(ht)', True, 'import numpy as np\n'), ((106, 13, 106, 26), 'torch.exp', 'torch.exp', ({(106, 23, 106, 25): 'lt'}, {}), '(lt)', False, 'import torch\n'), ((107, 13, 107, 26), 'torch.exp', 'torch.exp', ({(107, 23, 107, 25): 'wt'}, {}), '(wt)', False, 'import torch\n'), ((108, 13, 108, 26), 'torch.exp', 'torch.exp', ({(108, 23, 108, 25): 'ht'}, {}), '(ht)', False, 'import torch\n'), ((128, 29, 128, 61), 'torch.max', 'torch.max', (), '', False, 'import torch\n'), ((136, 29, 136, 61), 'torch.max', 'torch.max', (), '', False, 'import torch\n'), ((132, 20, 132, 39), 'torch.tensor', 'torch.tensor', ({(132, 33, 132, 38): 'np.pi'}, {}), '(np.pi)', False, 'import torch\n'), ((133, 20, 133, 37), 'torch.tensor', 'torch.tensor', ({(133, 33, 133, 36): '(0.0)'}, {}), '(0.0)', False, 'import torch\n')] |
jainajinkya/deep_bingham | utils/utils.py | 2ea85b3ea2af579eab36567091b88a1bbf4a627b | """ Utilities for learning pipeline."""
from __future__ import print_function
import copy
import dill
import hashlib
import itertools
import third_party.deep_bingham.bingham_distribution as ms
import math
import numpy as np
import os
import scipy
import scipy.integrate as integrate
import scipy.special
import sys
import torch
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
def convert_euler_to_quaternion(roll, yaw, pitch):
"""Converts roll, yaw, pitch to a quaternion.
"""
# roll (z), yaw (y), pitch (x)
cy = math.cos(math.radians(roll) * 0.5)
sy = math.sin(math.radians(roll) * 0.5)
cp = math.cos(math.radians(yaw) * 0.5)
sp = math.sin(math.radians(yaw) * 0.5)
cr = math.cos(math.radians(pitch) * 0.5)
sr = math.sin(math.radians(pitch) * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def radians(degree_tensor):
"""
Method to convert a torch tensor of angles in degree format to radians.
Arguments:
degree_tensor (torch.Tensor): Tensor consisting of angles in degree format.
Returns:
radian_tensor (torch.Tensor): Tensor consisting of angles in radian format.
"""
radian_tensor = degree_tensor/180 * math.pi
return radian_tensor
def generate_coordinates(coords):
"""
A function that returns all possible triples of coords
Parameters:
coords: a numpy array of coordinates
Returns:
x: the first coordinate of possible triples
y: the second coordinate of possible triples
z the third coordinate of possible triples
"""
x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()
y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))
z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))
return x, y, z
def ensure_dir_exists(path):
""" Checks if a directory exists and creates it otherwise. """
if not os.path.exists(path):
os.makedirs(path)
def load_lookup_table(path):
"""
Loads lookup table from dill serialized file.
Returns a table specific tuple. For the Bingham case, the tuple containins:
table_type (str):
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
coords (numpy.ndarray): Coordinates at which lookup table was evaluated.
For the von Mises case, it contains:
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
"""
assert os.path.exists(path), "Lookup table file not found."
with open(path, "rb") as dillfile:
return dill.load(dillfile)
def eaad_von_mises(kappas, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
kappas: Von Mises kappa parameters for roll, pitch, yaw.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2.0 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-2, "epsabs": 1e-2}
param_mu = np.array([0., 0., 0.]) # radians
quat_mu = convert_euler_to_quaternion(
math.degrees(param_mu[0]), math.degrees(param_mu[1]),
math.degrees(param_mu[2])
)
param_kappa = kappas
direct_norm_const = 8.0 * (np.pi ** 3) \
* scipy.special.iv(0, param_kappa[0]) \
* scipy.special.iv(0, param_kappa[1]) \
* scipy.special.iv(0, param_kappa[2])
def integrand_aad(phi1, phi2, phi3):
return np.exp(param_kappa[0] * np.cos(phi1)) \
* np.exp(param_kappa[1] * np.cos(phi2)) \
* np.exp(param_kappa[2] * np.cos(phi3)) \
* aad(quat_mu,
convert_euler_to_quaternion(
math.degrees(phi1), math.degrees(phi2),
math.degrees(phi3)
))
eaad_int = integrate.tplquad(
integrand_aad,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: 2. * np.pi, # phi2
lambda x, y: 0.0, lambda x, y: 2. * np.pi, # phi1
**integral_options
)
return eaad_int[0]/direct_norm_const
def eaad_bingham(bingham_z, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
bingham_z: Bingham dispersion parameter in the format expected by the
manstats BinghamDistribution class.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
# acos_val = np.arccos(np.dot(quat_a, quat_b))
# diff_ang = 2 * np.min([acos_val, np.pi - acos_val])
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-4, "epsabs": 1e-4}
bd = ms.BinghamDistribution(
np.eye(4), bingham_z,
{"norm_const_mode": "numerical",
"norm_const_options": integral_options}
)
def integrand_transformed(x):
# To avoid unnecessary divisions, this term does not contain the
# normalization constant. At the end, the result of the integration is
# divided by it.
return aad(x, bd.mode) \
* np.exp(np.dot(x, np.dot(np.diag(bingham_z), x)))
def integrand(phi1, phi2, phi3):
sp1 = np.sin(phi1)
sp2 = np.sin(phi2)
return integrand_transformed(np.array([
sp1 * sp2 * np.sin(phi3),
sp1 * sp2 * np.cos(phi3),
sp1 * np.cos(phi2),
np.cos(phi1)
])) * (sp1 ** 2.) * sp2
eaad_int = integrate.tplquad(
integrand,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi, # phi1
**integral_options
)
return eaad_int[0] / bd.norm_const
def build_bd_lookup_table(table_type, options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
table_type: Type of lookup table used. May be 'uniform' or 'nonuniform'
options: Dict cotaining type specific options.
If type is "uniform" this dict must contain:
"bounds" = Tuple (lower_bound, upper_bound) representing bounds.
"num_points" = Number of points per dimension.
If type is "nonuniform" this dict must contain a key "coords" which
is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(table_type.encode('utf-8'))
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_type, serialized_options, res_table, coords) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(serialized_type)
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
elif table_type == "uniform":
# Number of points per axis.
(lbound, rbound) = options["bounds"]
num_points = options["num_points"]
assert num_points > 1, \
"Grid must have more than one point per dimension."
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = np.linspace(lbound, rbound, num_points)
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
elif table_type == "nonuniform":
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = options["coords"]
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
else:
sys.exit("Unknown lookup table type")
return res_table
def build_vm_lookup_table(options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
options: Dict cotaining table options. It must contain a key "coords"
which is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_options, res_table) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
else:
coords = options["coords"]
res_table = _compute_vm_lookup_table(coords)
with open(path, "wb") as dillfile:
dill.dump((options, res_table), dillfile)
return res_table
def _compute_bd_lookup_table(coords, nc_options):
num_points = len(coords)
pool = Pool(max(cpu_count()//2, 1))
def nc_wrapper(idx):
pt_idx = point_indices[idx]
# Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no impact
# on the result as the Bingham normalization constant is agnostic to it.
# However, the numpy integration that is used to compute it, combines
# numerical 2d and 1d integration which is why the order matters for the
# actual computation time.
#
# TODO: Make pymanstats choose best order automatically.
norm_const = ms.BinghamDistribution.normalization_constant(
np.array(
[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]),
"numerical", nc_options)
print("Computing NC for Z=[{}, {}, {}, 0.0]: {}".format(
coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],
norm_const))
return norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
class AverageMeter(object):
"""Computes and stores the averages over a numbers or dicts of numbers.
For the dict, this class assumes that no new keys are added during
the computation.
"""
def __init__(self):
self.last_val = 0
self.avg = 0
self.count = 0
def update(self, val, n=1):
self.last_val = val
n = float(n)
if type(val) == dict:
if self.count == 0:
self.avg = copy.deepcopy(val)
else:
for key in val:
self.avg[key] *= self.count / (self.count + n)
self.avg[key] += val[key] * n / (self.count + n)
else:
self.avg *= self.count / (self.count + n)
self.avg += val * n / (self.count + n)
self.count += n
self.last_val = val
def _compute_vm_lookup_table(coords):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
cur_pt_idx = point_indices[idx]
log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[2]]))
print("Computing NC for kappas=[{}, {}, {}]: {}".format(
coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]],
log_norm_const))
return log_norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
def vec_to_bingham_z_many(y):
z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0)
return z
def vec_to_bingham_z(y):
z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0)
if not all(z[0][:-1] <= z[0][1:]):
print(z)
return z
| [((40, 11, 40, 33), 'numpy.array', 'np.array', ({(40, 20, 40, 32): '[w, x, y, z]'}, {}), '([w, x, y, z])', True, 'import numpy as np\n'), ((98, 11, 98, 31), 'os.path.exists', 'os.path.exists', ({(98, 26, 98, 30): 'path'}, {}), '(path)', False, 'import os\n'), ((119, 15, 119, 37), 'numpy.array', 'np.array', ({(119, 24, 119, 36): '[0.0, 0.0, 0.0]'}, {}), '([0.0, 0.0, 0.0])', True, 'import numpy as np\n'), ((141, 15, 147, 5), 'scipy.integrate.tplquad', 'integrate.tplquad', ({(142, 8, 142, 21): 'integrand_aad', (143, 8, 143, 11): '0.0', (143, 13, 143, 24): '2.0 * np.pi', (144, 8, 144, 21): 'lambda x: 0.0', (144, 23, 144, 44): 'lambda x: 2.0 * np.pi', (145, 8, 145, 24): 'lambda x, y: 0.0', (145, 26, 145, 49): 'lambda x, y: 2.0 * np.pi'}, {}), '(integrand_aad, 0.0, 2.0 * np.pi, lambda x: 0.0, lambda x:\n 2.0 * np.pi, lambda x, y: 0.0, lambda x, y: 2.0 * np.pi, **integral_options\n )', True, 'import scipy.integrate as integrate\n'), ((195, 15, 201, 5), 'scipy.integrate.tplquad', 'integrate.tplquad', ({(196, 8, 196, 17): 'integrand', (197, 8, 197, 11): '0.0', (197, 13, 197, 24): '2.0 * np.pi', (198, 8, 198, 21): 'lambda x: 0.0', (198, 23, 198, 38): 'lambda x: np.pi', (199, 8, 199, 24): 'lambda x, y: 0.0', (199, 26, 199, 44): 'lambda x, y: np.pi'}, {}), '(integrand, 0.0, 2.0 * np.pi, lambda x: 0.0, lambda x: np.\n pi, lambda x, y: 0.0, lambda x, y: np.pi, **integral_options)', True, 'import scipy.integrate as integrate\n'), ((225, 15, 225, 31), 'hashlib.sha256', 'hashlib.sha256', ({}, {}), '()', False, 'import hashlib\n'), ((235, 7, 235, 27), 'os.path.exists', 'os.path.exists', ({(235, 22, 235, 26): 'path'}, {}), '(path)', False, 'import os\n'), ((293, 15, 293, 31), 'hashlib.sha256', 'hashlib.sha256', ({}, {}), '()', False, 'import hashlib\n'), ((302, 7, 302, 27), 'os.path.exists', 'os.path.exists', ({(302, 22, 302, 26): 'path'}, {}), '(path)', False, 'import os\n'), ((395, 11, 395, 17), 'pathos.multiprocessing.ProcessingPool', 'Pool', ({}, {}), '()', True, 'from pathos.multiprocessing import ProcessingPool as Pool\n'), ((41, 18, 41, 38), 'numpy.linalg.norm', 'np.linalg.norm', ({(41, 33, 41, 37): 'quat'}, {}), '(quat)', True, 'import numpy as np\n'), ((80, 11, 80, 31), 'os.path.exists', 'os.path.exists', ({(80, 26, 80, 30): 'path'}, {}), '(path)', False, 'import os\n'), ((81, 8, 81, 25), 'os.makedirs', 'os.makedirs', ({(81, 20, 81, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((100, 15, 100, 34), 'dill.load', 'dill.load', ({(100, 25, 100, 33): 'dillfile'}, {}), '(dillfile)', False, 'import dill\n'), ((121, 8, 121, 33), 'math.degrees', 'math.degrees', ({(121, 21, 121, 32): 'param_mu[0]'}, {}), '(param_mu[0])', False, 'import math\n'), ((121, 35, 121, 60), 'math.degrees', 'math.degrees', ({(121, 48, 121, 59): 'param_mu[1]'}, {}), '(param_mu[1])', False, 'import math\n'), ((122, 8, 122, 33), 'math.degrees', 'math.degrees', ({(122, 21, 122, 32): 'param_mu[2]'}, {}), '(param_mu[2])', False, 'import math\n'), ((129, 10, 129, 45), 'scipy.special.iv', 'scipy.special.iv', ({(129, 27, 129, 28): '(0)', (129, 30, 129, 44): 'param_kappa[2]'}, {}), '(0, param_kappa[2])', False, 'import scipy\n'), ((173, 8, 173, 17), 'numpy.eye', 'np.eye', ({(173, 15, 173, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((186, 14, 186, 26), 'numpy.sin', 'np.sin', ({(186, 21, 186, 25): 'phi1'}, {}), '(phi1)', True, 'import numpy as np\n'), ((187, 14, 187, 26), 'numpy.sin', 'np.sin', ({(187, 21, 187, 25): 'phi2'}, {}), '(phi2)', True, 'import numpy as np\n'), ((227, 20, 227, 39), 'dill.dumps', 'dill.dumps', ({(227, 31, 227, 38): 'options'}, {}), '(options)', False, 'import dill\n'), ((294, 20, 294, 39), 'dill.dumps', 'dill.dumps', ({(294, 31, 294, 38): 'options'}, {}), '(options)', False, 'import dill\n'), ((350, 18, 350, 63), 'numpy.ones', 'np.ones', ({(350, 26, 350, 62): '(num_points, num_points, num_points)'}, {}), '((num_points, num_points, num_points))', True, 'import numpy as np\n'), ((414, 18, 414, 63), 'numpy.ones', 'np.ones', ({(414, 26, 414, 62): '(num_points, num_points, num_points)'}, {}), '((num_points, num_points, num_points))', True, 'import numpy as np\n'), ((26, 18, 26, 36), 'math.radians', 'math.radians', ({(26, 31, 26, 35): 'roll'}, {}), '(roll)', False, 'import math\n'), ((27, 18, 27, 36), 'math.radians', 'math.radians', ({(27, 31, 27, 35): 'roll'}, {}), '(roll)', False, 'import math\n'), ((29, 18, 29, 35), 'math.radians', 'math.radians', ({(29, 31, 29, 34): 'yaw'}, {}), '(yaw)', False, 'import math\n'), ((30, 18, 30, 35), 'math.radians', 'math.radians', ({(30, 31, 30, 34): 'yaw'}, {}), '(yaw)', False, 'import math\n'), ((32, 18, 32, 37), 'math.radians', 'math.radians', ({(32, 31, 32, 36): 'pitch'}, {}), '(pitch)', False, 'import math\n'), ((33, 18, 33, 37), 'math.radians', 'math.radians', ({(33, 31, 33, 36): 'pitch'}, {}), '(pitch)', False, 'import math\n'), ((128, 10, 128, 45), 'scipy.special.iv', 'scipy.special.iv', ({(128, 27, 128, 28): '(0)', (128, 30, 128, 44): 'param_kappa[1]'}, {}), '(0, param_kappa[1])', False, 'import scipy\n'), ((231, 15, 231, 40), 'os.path.dirname', 'os.path.dirname', ({(231, 31, 231, 39): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((238, 18, 238, 37), 'dill.load', 'dill.load', ({(238, 28, 238, 36): 'dillfile'}, {}), '(dillfile)', False, 'import dill\n'), ((239, 23, 239, 39), 'hashlib.sha256', 'hashlib.sha256', ({}, {}), '()', False, 'import hashlib\n'), ((256, 17, 256, 56), 'numpy.linspace', 'np.linspace', ({(256, 29, 256, 35): 'lbound', (256, 37, 256, 43): 'rbound', (256, 45, 256, 55): 'num_points'}, {}), '(lbound, rbound, num_points)', True, 'import numpy as np\n'), ((298, 15, 298, 40), 'os.path.dirname', 'os.path.dirname', ({(298, 31, 298, 39): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((305, 18, 305, 37), 'dill.load', 'dill.load', ({(305, 28, 305, 36): 'dillfile'}, {}), '(dillfile)', False, 'import dill\n'), ((306, 23, 306, 39), 'hashlib.sha256', 'hashlib.sha256', ({}, {}), '()', False, 'import hashlib\n'), ((317, 12, 317, 53), 'dill.dump', 'dill.dump', ({(317, 22, 317, 42): '(options, res_table)', (317, 44, 317, 52): 'dillfile'}, {}), '((options, res_table), dillfile)', False, 'import dill\n'), ((338, 12, 339, 78), 'numpy.array', 'np.array', ({(339, 16, 339, 77): '[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.0]'}, {}), '([coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.0])', True, 'import numpy as np\n'), ((112, 36, 112, 58), 'numpy.dot', 'np.dot', ({(112, 43, 112, 49): 'quat_a', (112, 51, 112, 57): 'quat_b'}, {}), '(quat_a, quat_b)', True, 'import numpy as np\n'), ((127, 10, 127, 45), 'scipy.special.iv', 'scipy.special.iv', ({(127, 27, 127, 28): '(0)', (127, 30, 127, 44): 'param_kappa[0]'}, {}), '(0, param_kappa[0])', False, 'import scipy\n'), ((165, 36, 165, 58), 'numpy.dot', 'np.dot', ({(165, 43, 165, 49): 'quat_a', (165, 51, 165, 57): 'quat_b'}, {}), '(quat_a, quat_b)', True, 'import numpy as np\n'), ((241, 28, 241, 58), 'dill.dumps', 'dill.dumps', ({(241, 39, 241, 57): 'serialized_options'}, {}), '(serialized_options)', False, 'import dill\n'), ((261, 12, 261, 73), 'dill.dump', 'dill.dump', ({(261, 22, 261, 62): '(table_type, options, res_table, coords)', (261, 64, 261, 72): 'dillfile'}, {}), '((table_type, options, res_table, coords), dillfile)', False, 'import dill\n'), ((274, 8, 274, 45), 'sys.exit', 'sys.exit', ({(274, 17, 274, 44): '"""Unknown lookup table type"""'}, {}), "('Unknown lookup table type')", False, 'import sys\n'), ((307, 28, 307, 58), 'dill.dumps', 'dill.dumps', ({(307, 39, 307, 57): 'serialized_options'}, {}), '(serialized_options)', False, 'import dill\n'), ((325, 20, 325, 31), 'pathos.multiprocessing.cpu_count', 'cpu_count', ({}, {}), '()', False, 'from pathos.multiprocessing import cpu_count\n'), ((379, 27, 379, 45), 'copy.deepcopy', 'copy.deepcopy', ({(379, 41, 379, 44): 'val'}, {}), '(val)', False, 'import copy\n'), ((403, 21, 403, 63), 'scipy.special.iv', 'scipy.special.iv', ({(403, 38, 403, 39): '(0)', (403, 41, 403, 62): 'coords[cur_pt_idx[2]]'}, {}), '(0, coords[cur_pt_idx[2]])', False, 'import scipy\n'), ((137, 25, 137, 43), 'math.degrees', 'math.degrees', ({(137, 38, 137, 42): 'phi1'}, {}), '(phi1)', False, 'import math\n'), ((137, 45, 137, 63), 'math.degrees', 'math.degrees', ({(137, 58, 137, 62): 'phi2'}, {}), '(phi2)', False, 'import math\n'), ((138, 25, 138, 43), 'math.degrees', 'math.degrees', ({(138, 38, 138, 42): 'phi3'}, {}), '(phi3)', False, 'import math\n'), ((271, 12, 271, 73), 'dill.dump', 'dill.dump', ({(271, 22, 271, 62): '(table_type, options, res_table, coords)', (271, 64, 271, 72): 'dillfile'}, {}), '((table_type, options, res_table, coords), dillfile)', False, 'import dill\n'), ((402, 21, 402, 63), 'scipy.special.iv', 'scipy.special.iv', ({(402, 38, 402, 39): '(0)', (402, 41, 402, 62): 'coords[cur_pt_idx[1]]'}, {}), '(0, coords[cur_pt_idx[1]])', False, 'import scipy\n'), ((134, 41, 134, 53), 'numpy.cos', 'np.cos', ({(134, 48, 134, 52): 'phi3'}, {}), '(phi3)', True, 'import numpy as np\n'), ((183, 41, 183, 59), 'numpy.diag', 'np.diag', ({(183, 49, 183, 58): 'bingham_z'}, {}), '(bingham_z)', True, 'import numpy as np\n'), ((400, 25, 400, 36), 'numpy.log', 'np.log', ({(400, 32, 400, 35): '(8.0)'}, {}), '(8.0)', True, 'import numpy as np\n'), ((401, 21, 401, 63), 'scipy.special.iv', 'scipy.special.iv', ({(401, 38, 401, 39): '(0)', (401, 41, 401, 62): 'coords[cur_pt_idx[0]]'}, {}), '(0, coords[cur_pt_idx[0]])', False, 'import scipy\n'), ((132, 39, 132, 51), 'numpy.cos', 'np.cos', ({(132, 46, 132, 50): 'phi1'}, {}), '(phi1)', True, 'import numpy as np\n'), ((133, 41, 133, 53), 'numpy.cos', 'np.cos', ({(133, 48, 133, 52): 'phi2'}, {}), '(phi2)', True, 'import numpy as np\n'), ((192, 12, 192, 24), 'numpy.cos', 'np.cos', ({(192, 19, 192, 23): 'phi1'}, {}), '(phi1)', True, 'import numpy as np\n'), ((400, 45, 400, 58), 'numpy.log', 'np.log', ({(400, 52, 400, 57): 'np.pi'}, {}), '(np.pi)', True, 'import numpy as np\n'), ((427, 9, 427, 21), 'torch.exp', 'torch.exp', ({(427, 19, 427, 20): 'y'}, {}), '(y)', False, 'import torch\n'), ((432, 9, 432, 21), 'torch.exp', 'torch.exp', ({(432, 19, 432, 20): 'y'}, {}), '(y)', False, 'import torch\n'), ((189, 24, 189, 36), 'numpy.sin', 'np.sin', ({(189, 31, 189, 35): 'phi3'}, {}), '(phi3)', True, 'import numpy as np\n'), ((190, 24, 190, 36), 'numpy.cos', 'np.cos', ({(190, 31, 190, 35): 'phi3'}, {}), '(phi3)', True, 'import numpy as np\n'), ((191, 18, 191, 30), 'numpy.cos', 'np.cos', ({(191, 25, 191, 29): 'phi2'}, {}), '(phi2)', True, 'import numpy as np\n')] |
obatsis/Distributed-NTUA | cli_ui.py | 0bf39163b64aaefb2576be01337e0ec6e026ce6d | import requests
import os
from PyInquirer import style_from_dict, Token, prompt
import sys
import utils.config as config
import utils.ends as ends
from utils.colorfy import *
from auto.testing import test_trans
import time
import json
style = style_from_dict({
Token.QuestionMark: '#E91E63 bold',
Token.Selected: '#673AB7 bold',
Token.Instruction: '#0bf416',
Token.Answer: '#2196f3 bold',
Token.Question: '#0bf416 bold',
})
def client(ip, port):
os.system('clear')
cyan('What a beautiful day to enter the cult...')
baseURL = 'http://' + ip + ':' + port
while True:
print('----------------------------------------------------------------------')
method_q = {
'type': 'list',
'name': 'method',
'message': 'Select action:',
'choices': ['Network Overlay', \
'Insert a Song', \
'Search for a Song', \
'Delete a Song', \
'Depart from Chord', \
'Run automated test', \
'Help', \
'Exit']
}
method_a = prompt(method_q, style=style)['method']
os.system('clear')
if method_a == 'Depart from Chord':
print(cyan("Preparing Node to depart from Chord..."))
try:
response = requests.get(baseURL + ends.c_depart)
if response.status_code == 200:
if response.text == "Left the Chord":
print(response.text)
print(green("Node is out of Toychord network"))
else:
print(red(response.text))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Node didnt depart..."))
print(red("Unfortunately exiting..."))
break
elif method_a == 'Insert a Song':
print('Insert a Title-Value pair for the song you wish to insert')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
},
{
'type': 'input',
'name': 'value',
'message': 'Value:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Inserting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']})
if response.status_code == 200:
print(cyan("Inserted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Song wasnt inserted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Delete a Song':
print('Insert the Song Title you wish to delete')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Deleting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
# print(cyan("Deleting Song: ") + green(response.text.split(" ")[1]) + )
print(cyan("Deleted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(yellow("Song doesnt exist in the Chord"))
print(yellow("Couldnt delete it"))
except:
print(red("Could not establish connection with Node. Song wasnt deleted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Search for a Song':
print('Insert the Song Title you wish to Search or * to get all songs of the Chord')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
if fetch_a['key'] == "*":
print(cyan("Fetching all the songs of the Chord..."))
try:
response = requests.get(baseURL + ends.c_query_star)
if response.status_code == 200:
nodes_list = json.loads(response.text)
# print(green(response.text))
# print(cyan()))
for node in nodes_list["res"]:
print(header("\n" + node["uid"]) + " " + underline(node["ip"] + ":" + node["port"]))
for song in node["song"]:
print(" -" + green(song["key"]) + " " + song["value"])
else:
print(yellow("Something went Wrong...") + response.status_code)
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
else:
print(cyan("Searching Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
print("Song found in node with id: ",green(response.text.split(" ")[0]))
print("Song value: " + green(response.text.split(" ")[1]))
else:
print(yellow("Song doesnt exist in the Chord"))
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Network Overlay':
print(cyan("Initiating Network Overlay..."))
try:
response = requests.get(baseURL + ends.c_overlay)
if response.status_code == 200:
nodes_list = json.loads(response.text)
print('\n')
for node in nodes_list["res"]:
print(green(node["ip"] + ":" + node["port"]), end = '')
if node != nodes_list["res"][-1]:
print(" -> ", end = '')
print('\n')
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Help':
print('-------------------------------- Help --------------------------------\n')
overlayHelp=header("Overlay: ") + cyan("This functions recreates and prints the current Network Topology(eg. Node1 -> Node2 -> ...)\n")
insertHelp=header("Insert Song: ") + cyan("This functions expects a Song Title and a Song Value and inserts them in the Chord\n")
queryHelp=header("Search Song: ") + cyan("This function expects a Song Title and returns the Node in whitch the song is stored and the value of the song\n")
deleteHelp=header("Delete Song: ") + cyan("This function expects a Song Title and returns the Node who deleted the song\n")
departHelp=header("Depart: ") + cyan("This function makes the node connected to this cli leave the Chord\n")
autoTests=header("Run automated tests: ") + cyan("This function expects a test number (1=insert, 2=query, 3=requests), runs the test and returns the chord throughput")
print( " -",overlayHelp,"\n"
" -",insertHelp,"\n",
"-",queryHelp,"\n",
"-",deleteHelp,"\n",
"-",departHelp,"\n",
"-",autoTests,"\n",
)
continue
elif method_a == 'Run automated test':
print('Select which test you wish to run (1 = insert, 2 = query, 3 = requests)')
fetch_q = [
{
'type': 'input',
'name': 'test_n',
'message': 'Test:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's'
if test_number not in ('1', '2', '3'):
print(yellow("Wrong test number (give 1, 2 or 3)"))
continue
print(cyan("Running automated test: ") + ("insert" if test_number == '1' else ("query" if test_number == '2' else "requests")) + cyan("..."))
print(blue(test_trans(test_number)))
print(cyan("Done!"))
continue
elif method_a == 'Exit':
os.system('clear')
break
else:
os.system('clear')
continue
if __name__ == '__main__':
if len(sys.argv) < 3:
print("!! you must tell me the port. Ex. -p 5000 !!")
exit(0)
if sys.argv[1] in ("-p", "-P"):
my_port = sys.argv[2]
my_ip = os.popen('ip addr show ' + config.NETIFACE + ' | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'').read().strip()
client(my_ip, my_port)
| [((11, 8, 17, 2), 'PyInquirer.style_from_dict', 'style_from_dict', ({(11, 24, 17, 1): "{Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.\n Instruction: '#0bf416', Token.Answer: '#2196f3 bold', Token.Question:\n '#0bf416 bold'}"}, {}), "({Token.QuestionMark: '#E91E63 bold', Token.Selected:\n '#673AB7 bold', Token.Instruction: '#0bf416', Token.Answer:\n '#2196f3 bold', Token.Question: '#0bf416 bold'})", False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((20, 1, 20, 19), 'os.system', 'os.system', ({(20, 11, 20, 18): '"""clear"""'}, {}), "('clear')", False, 'import os\n'), ((40, 2, 40, 20), 'os.system', 'os.system', ({(40, 12, 40, 19): '"""clear"""'}, {}), "('clear')", False, 'import os\n'), ((39, 13, 39, 42), 'PyInquirer.prompt', 'prompt', (), '', False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((44, 15, 44, 52), 'requests.get', 'requests.get', ({(44, 28, 44, 51): 'baseURL + ends.c_depart'}, {}), '(baseURL + ends.c_depart)', False, 'import requests\n'), ((74, 13, 74, 41), 'PyInquirer.prompt', 'prompt', (), '', False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((77, 15, 77, 107), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((98, 13, 98, 41), 'PyInquirer.prompt', 'prompt', (), '', False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((101, 15, 101, 82), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((124, 13, 124, 41), 'PyInquirer.prompt', 'prompt', (), '', False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((235, 10, 235, 129), 'os.popen', 'os.popen', ({(235, 19, 235, 128): '\'ip addr show \' + config.NETIFACE + \' | grep "\\\\<inet\\\\>" | awk \\\'{ print $2 }\\\' | awk -F "/" \\\'{ print $1 }\\\'\''}, {}), '(\'ip addr show \' + config.NETIFACE +\n \' | grep "\\\\<inet\\\\>" | awk \\\'{ print $2 }\\\' | awk -F "/" \\\'{ print $1 }\\\'\'\n )', False, 'import os\n'), ((128, 16, 128, 57), 'requests.get', 'requests.get', ({(128, 29, 128, 56): 'baseURL + ends.c_query_star'}, {}), '(baseURL + ends.c_query_star)', False, 'import requests\n'), ((146, 16, 146, 82), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((162, 15, 162, 53), 'requests.get', 'requests.get', ({(162, 28, 162, 52): 'baseURL + ends.c_overlay'}, {}), '(baseURL + ends.c_overlay)', False, 'import requests\n'), ((130, 19, 130, 44), 'json.loads', 'json.loads', ({(130, 30, 130, 43): 'response.text'}, {}), '(response.text)', False, 'import json\n'), ((164, 18, 164, 43), 'json.loads', 'json.loads', ({(164, 29, 164, 42): 'response.text'}, {}), '(response.text)', False, 'import json\n'), ((211, 13, 211, 41), 'PyInquirer.prompt', 'prompt', (), '', False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((222, 3, 222, 21), 'os.system', 'os.system', ({(222, 13, 222, 20): '"""clear"""'}, {}), "('clear')", False, 'import os\n'), ((226, 3, 226, 21), 'os.system', 'os.system', ({(226, 13, 226, 20): '"""clear"""'}, {}), "('clear')", False, 'import os\n'), ((217, 14, 217, 37), 'auto.testing.test_trans', 'test_trans', ({(217, 25, 217, 36): 'test_number'}, {}), '(test_number)', False, 'from auto.testing import test_trans\n')] |
jdrese/SIWeightEditor | Contents/scripts/siweighteditor/weight.py | 0529c1a366b955f4373acd2e2f08f63b7909ff82 | # -*- coding: utf-8 -*-
from maya import mel
from maya import cmds
from . import lang
from . import common
import os
import json
import re
class WeightCopyPaste():
def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto',
threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False):
if viewmsg:
cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5)
'''
ウェイトデータの保存、読み込み関数
mode→コピーするかペーストするか'copy'or'paste'
saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定
method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」
「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。
「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。
「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、
ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。
「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。
nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在
→barycentric、bylinearはMaya2016Extention2から利用可能
weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。
→Mayaコピー時にファイル名指定すると複数保存できないので注意。
threshold→nearest,barycentricの位置検索範囲
'''
self.skinMeshes = skinMeshes
self.saveName = saveName
self.method = method
self.weightFile = weightFile
self.threshold = threshold
self.engine = engine
self.memShapes = {}
self.target = tgt
self.pasteMode = {'index':1, 'nearest':3}
# リストタイプじゃなかったらリストに変換する
if not isinstance(self.skinMeshes, list):
temp = self.skinMeshes
self.skinMeshes = []
self.skinMeshes.append(temp)
# ファイルパスを生成しておく
if path == 'default':
self.filePath = os.getenv('MAYA_APP_DIR') + '\\Scripting_Files\\weight\\' + self.saveName
elif path == 'project':
self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1])
self.protect_path = os.path.join(self.scene_path, 'weight_protector')
try:
if not os.path.exists(self.protect_path):
os.makedirs(self.protect_path)
except Exception as e:
print e.message
return
self.filePath = self.protect_pat+'\\' + self.saveName
self.fileName = os.path.join(self.filePath, self.saveName + '.json')
self.apiName = os.path.join(self.filePath, self.saveName + '.skn')
# コピーかペーストをそれぞれ呼び出し
if mode == 'copy':
self.weightCopy()
if mode == 'paste':
self.weightPaste()
def weightPaste(self):
dummy = cmds.spaceLocator()
for skinMesh in self.skinMeshes:
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
meshName = str(weightFile).replace('|', '__pipe__')
if os.path.exists(self.fileName):
try:
with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
saveData = json.load(f) # ロード
# self.visibility = saveData['visibility']#セーブデータ読み込み
skinningMethod = saveData[';skinningMethod']
dropoffRate = saveData[';dropoffRate']
maintainMaxInfluences = saveData[';maintainMaxInfluences']
maxInfluences = saveData[';maxInfluences']
bindMethod = saveData[';bindMethod']
normalizeWeights = saveData[';normalizeWeights']
influences = saveData[';influences']
# 子のノードがトランスフォームならダミーに親子付けして退避
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut')
influences = cmds.ls(influences, l=True, tr=True)
# バインド
dstSkinCluster = cmds.skinCluster(
skinMesh,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
dstSkinCluster = dstSkinCluster[0]
# 親子付けを戻す
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent')
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
except Exception as e:
print e.message
print 'Error !! Skin bind failed : ' + skinMesh
continue
else:
dstSkinCluster = dstSkinCluster[0]
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
if self.engine == 'maya':
files = os.listdir(self.filePath)
print files
if len(files) == 2:
for file in files:
name, ext = os.path.splitext(file)
if ext == '.xml':
xml_name = file
else:
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
xml_name = meshName + '.xml'
if os.path.isfile(self.filePath + '\\' + xml_name):
if self.method == 'index' or self.method == 'over':
cmds.deformerWeights(xml_name,
im=True,
method=self.method,
deformer=dstSkinCluster,
path=self.filePath + '\\')
else:
cmds.deformerWeights(xml_name,
im=True,
deformer=dstSkinCluster,
method=self.method,
worldSpace=True,
positionTolerance=self.threshold,
path=self.filePath + '\\')
cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True)
print 'Weight paste to : ' + str(skinMesh)
else:
print 'Not exist seved weight XML file : ' + skinMesh
# ダミー親削除
cmds.delete(dummy)
cmds.select(self.skinMeshes, r=True)
# ウェイト情報を保存する関数
def weightCopy(self):
saveData = {}
# 保存ディレクトリが無かったら作成
if not os.path.exists(self.filePath):
os.makedirs(os.path.dirname(self.filePath + '\\')) # 末尾\\が必要なので注意
else: # ある場合は中身を削除
files = os.listdir(self.filePath)
if files is not None:
for file in files:
os.remove(self.filePath + '\\' + file)
skinFlag = False
all_influences = []
for skinMesh in self.skinMeshes:
try:
cmds.bakePartialHistory(skinMesh, ppt=True)
except:
pass
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったら次に移行
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
saveData[';skinningMethod'] = skinningMethod
saveData[';dropoffRate'] = dropoffRate
saveData[';maintainMaxInfluences'] = maintainMaxInfluences
saveData[';maxInfluences'] = maxInfluences
saveData[';bindMethod'] = bindMethod
saveData[';normalizeWeights'] = normalizeWeights
all_influences += influences
#saveData[';influences'] = influences
skinFlag = True
all_influences = list(set(all_influences))
saveData[';influences'] = all_influences
#インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS
for skinMesh in self.skinMeshes:
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったらfor分の次に移行
srcSkinCluster = srcSkinCluster[0]
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
sub_influences = list(set(all_influences) - set(influences))
if sub_influences:
cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0)
if self.engine == 'maya':
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\')
with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
json.dump(saveData, f)
def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True):
'''
スキンウェイトの転送関数
転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド
・引数
skinMesh→転送元メッシュ(1個,リスト形式でも可)
transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫)
transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue
logTransfer→ログ表示するかどうか
returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse
'''
massege01 = lang.Lang(
en=': It does not perform the transfer of weight because it is not a skin mesh.',
ja=u': スキンメッシュではないのでウェイトの転送を行いません'
).output()
massege02 = lang.Lang(
en='Transfer the weight:',
ja=u'ウェイトを転送:'
).output()
massege03 = lang.Lang(
en='Transfer bind influences:',
ja=u'バインド状態を転送:'
).output()
if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す
skinMesh = skinMesh[0] # リストを渡されたときのための保険
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False)
if not srcSkinCluster:
if logTransfer:
print skinMesh + massege01
return False # スキンクラスタがなかったら関数抜ける
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード
# リストタイプじゃなかったらリストに変換する
if not isinstance(transferedMesh, list):
temp = transferedMesh
transferedMesh = []
transferedMesh.append(temp)
for dst in transferedMesh:
#子供のノード退避用ダミーペアレントを用意
dummy = common.TemporaryReparent().main(mode='create')
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut')
shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh')
if not shapes: # もしメッシュがなかったら
continue # 処理を中断して次のオブジェクトへ
# スキンクラスタの有無を取得
dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
# バインド
dstSkinCluster = cmds.skinCluster(
dst,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
if logTransfer:
print massege03 + '[' + skinMesh + '] >>> [' + dst + ']'
dstSkinCluster = dstSkinCluster[0]
if transferWeight:
cmds.copySkinWeights(
ss=srcSkinCluster,
ds=dstSkinCluster,
surfaceAssociation='closestPoint',
influenceAssociation=['name', 'closestJoint', 'oneToOne'],
normalize=True,
noMirror=True
)
if logTransfer:
print massege02 + '[' + skinMesh + '] >>> [' + dst + ']'
#親子付けを戻す
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent')
#ダミーペアレントを削除
common.TemporaryReparent().main(dummyParent=dummy, mode='delete')
if returnInfluences:
return influences
else:
return True
def symmetry_weight(srcNode=None, dstNode=None, symWeight=True):
'''
ウェイトシンメトリする関数
srcNode→反転元
dstNode→反転先
symWeight→ウェイトミラーするかどうか
'''
# スキンクラスタを取得
if srcNode is None:
return
srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh')
if srcShapes:
srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster')
# スキンクラスタがあったらジョイントラベルを設定してウェイトミラー
if srcSkinCluster:
# バインド状態を転送する関数呼び出し
skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得
for skinJoint in skinJointAll:
# ジョイントラベル設定関数呼び出し
joint_label(skinJoint, visibility=False)
if symWeight is False or dstNode is None:
return
transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True)
dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh')
dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False)
cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0],
mirrorMode='YZ', surfaceAssociation='closestComponent',
influenceAssociation='label', normalize=True)
def load_joint_label_rules():
#ロードできなかった時の初期値
start_l_list = ['L_', 'l_', 'Left_', 'left_']
start_r_list = ['R_', 'r_', 'Right_', 'right_']
mid_l_list = ['_L_', '_l_', '_Left_', '_left_']
mid_r_list = ['_R_', '_r_', '_Right_', '_right_']
end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']
end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']
def_left_list_list = [start_l_list, mid_l_list, end_l_list]
def_right_list_list = [start_r_list, mid_r_list, end_r_list]
#左右対称設定ファイルからルールをロードする
dir_path = os.path.join(
os.getenv('MAYA_APP_dir'),
'Scripting_Files')
start_file = dir_path+'/joint_rule_start.json'
middle_file = dir_path+'/joint_rule_middle.json'
end_file = dir_path+'/joint_rule_end.json'
save_files = [start_file, middle_file, end_file]
left_list_list = []
right_list_list = []
for i, save_file in enumerate(save_files):
if os.path.exists(save_file):#保存ファイルが存在したら
try:
with open(save_file, 'r') as f:
save_data = json.load(f)
l_list = save_data.keys()
r_list = save_data.values()
left_list_list.append(l_list)
right_list_list.append(r_list)
except Exception as e:
print e.message
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
else:
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
return left_list_list, right_list_list
def joint_label(object, visibility=False):
'''
ジョイントラベル設定関数
object→オブジェクト、リスト形式可
visibility→ラベルの可視性、省略可能。デフォルトFalse。
'''
#ラベリングルールをロードしておく
left_list_list, right_list_list = load_joint_label_rules()
# リストタイプじゃなかったらリストに変換する
if not isinstance(object, list):
temp = object
object = []
object.append(temp)
for skinJoint in object:
objTypeName = cmds.objectType(skinJoint)
if objTypeName == 'joint':
split_name = skinJoint.split('|')[-1]
# スケルトン名にLRが含まれているかどうかを判定
side = 0
side_name = ''
for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)):
for j, lr_list in enumerate([l_list, r_list]):
for k, lr in enumerate(lr_list):
if i == 0:
if re.match(lr, split_name):
side = j + 1
if i == 1:
if re.search(lr, split_name):
side = j + 1
if i == 2:
if re.match(lr[::-1], split_name[::-1]):
side = j + 1
if side:#対象が見つかってたら全部抜ける
side_name = lr
break
if side:
break
if side:
break
#print 'joint setting :', split_name, side, side_name
# 左右のラベルを設定、どちらでもないときは中央
cmds.setAttr(skinJoint + '.side', side)
# ラベルタイプを”その他”に設定
cmds.setAttr(skinJoint + '.type', 18)
new_joint_name = split_name.replace(side_name.replace('.', ''), '')
# スケルトン名設定
cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string')
# 可視性設定
cmds.setAttr(skinJoint + '.drawLabel', visibility)
else:
print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip Command')
#ウェイトのミュートをトグル
def toggle_mute_skinning():
msg01 = lang.Lang(
en='No mesh selection.\nWould you like to process all of mesh in this scene?.',
ja=u'選択メッシュがありません。\nシーン内のすべてのメッシュを処理しますか?').output()
msg02 = lang.Lang(en='Yes', ja=u'はい').output()
msg03 = lang.Lang(en='No', ja=u'いいえ').output()
msg04 = lang.Lang(
en='Skinning is disabled',
ja=u'スキニングは無効になりました') .output()
msg05 = lang.Lang(
en='Skinning is enabled',
ja=u'スキニングが有効になりました') .output()
cmds.selectMode(o=True)
objects = cmds.ls(sl=True, l=True)
ad_node = []
for node in objects:
children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform')
ad_node += [node]+children
#print len(ad_node)
objects = set(ad_node)
#print len(objects)
if not objects:
all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03)
if all_mesh == msg02:
objects = cmds.ls(type='transform')
if not objects:
return
mute_flag = 1
skin_list = []
for node in objects:
skin = cmds.ls(cmds.listHistory(node), type='skinCluster')
if not skin:
continue
skin_list.append(skin)
if cmds.getAttr(skin[0]+'.envelope') > 0:
mute_flag = 0
for skin in skin_list:
cmds.setAttr(skin[0]+'.envelope', mute_flag)
if mute_flag == 0:
cmds.confirmDialog(m=msg04)
if mute_flag == 1:
cmds.confirmDialog(m=msg05) | [] |
Blulab-Utah/pyConTextPipeline | pyConTextNLP/__init__.py | d4060f89d54f4db56914832033f8ce589ee3c181 | #Copyright 2010 Brian E. Chapman
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""This is an alternative implementation of the pyConText package where I make
use of graphs to indicate relationships between targets and modifiers. Nodes of
thegraphs are the targets and modifiers identified in the text; edges of the
graphs are relationships between the targets. This provides for much simpler
code than what exists in the other version of pyConText where each object has a
dictionary of __modifies and __modifiedby that must be kept in sync with each
other.
Also it is hoped that the use of a directional graph could ultimately simplify
our itemData structures as we could chain together items"""
import os
version = {}
with open(os.path.join(os.path.dirname(__file__),"version.py")) as f0:
exec(f0.read(), version)
__version__ = version['__version__']
| [((27, 23, 27, 48), 'os.path.dirname', 'os.path.dirname', ({(27, 39, 27, 47): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
rcooke-ast/PYPIT | pypeit/metadata.py | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | """
Provides a class that handles the fits metadata required by PypeIt.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import io
import string
from copy import deepcopy
import datetime
from IPython import embed
import numpy as np
import yaml
from astropy import table, coordinates, time, units
from pypeit import msgs
from pypeit import utils
from pypeit.core import framematch
from pypeit.core import flux_calib
from pypeit.core import parse
from pypeit.core import meta
from pypeit.io import dict_to_lines
from pypeit.par import PypeItPar
from pypeit.par.util import make_pypeit_file
from pypeit.bitmask import BitMask
# TODO: Turn this into a DataContainer
# Initially tried to subclass this from astropy.table.Table, but that
# proved too difficult.
class PypeItMetaData:
"""
Provides a table and interface to the relevant fits file metadata
used during the reduction.
The content of the fits table is dictated by the header keywords
specified for the provided spectrograph. It is expected that this
table can be used to set the frame type of each file.
The metadata is validated using checks specified by the provided
spectrograph class.
For the data table, one should typically provide either the file
list from which to grab the data from the fits headers or the
data directly. If neither are provided the table is instantiated
without any data.
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:obj:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior.
files (:obj:`str`, :obj:`list`, optional):
The list of files to include in the table.
data (table-like, optional):
The data to include in the table. The type can be anything
allowed by the instantiation of
:class:`astropy.table.Table`.
usrdata (:obj:`astropy.table.Table`, optional):
A user provided set of data used to supplement or overwrite
metadata read from the file headers. The table must have a
`filename` column that is used to match to the metadata
table generated within PypeIt. **Note**: This is ignored if
`data` is also provided. This functionality is only used
when building the metadata from the fits files.
strict (:obj:`bool`, optional):
Function will fault if there is a problem with the reading
the header for any of the provided files; see
:func:`pypeit.spectrographs.spectrograph.get_headarr`. Set
to False to instead report a warning and continue.
Attributes:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:class:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior. If not
provided, the default parameters specific to the provided
spectrograph are used.
configs (:obj:`dict`):
A dictionary of the unique configurations identified.
type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`):
The bitmask used to set the frame type of each fits file.
calib_bitmask (:class:`BitMask`):
The bitmask used to keep track of the calibration group bits.
table (:class:`astropy.table.Table`):
The table with the relevant metadata for each fits file to
use in the data reduction.
"""
def __init__(self, spectrograph, par, files=None, data=None, usrdata=None,
strict=True):
if data is None and files is None:
# Warn that table will be empty
msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.'
' The table will be empty!')
# Initialize internals
self.spectrograph = spectrograph
self.par = par
if not isinstance(self.par, PypeItPar):
raise TypeError('Input parameter set must be of type PypeItPar.')
self.type_bitmask = framematch.FrameTypeBitMask()
# Build table
self.table = table.Table(data if files is None
else self._build(files, strict=strict,
usrdata=usrdata))
# Merge with user data, if present
if usrdata is not None:
self.merge(usrdata)
# Impose types on specific columns
self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str])
# Initialize internal attributes
self.configs = None
self.calib_bitmask = None
# Initialize columns that the user might add
self.set_user_added_columns()
# Validate instrument name
self.spectrograph.vet_instrument(self.table)
def _impose_types(self, columns, types):
"""
Impose a set of types on certain columns.
.. note::
:attr:`table` is edited in place.
Args:
columns (:obj:`list`):
List of column names
types (:obj:`list`):
List of types
"""
for c,t in zip(columns, types):
if c in self.keys():
self.table[c] = self.table[c].astype(t)
def _build(self, files, strict=True, usrdata=None):
"""
Generate the fitstbl that will be at the heart of PypeItMetaData.
Args:
files (:obj:`str`, :obj:`list`):
One or more files to use to build the table.
strict (:obj:`bool`, optional):
Function will fault if :func:`fits.getheader` fails to
read any of the headers. Set to False to report a
warning and continue.
usrdata (astropy.table.Table, optional):
Parsed for frametype for a few instruments (e.g. VLT)
where meta data may not be required
Returns:
dict: Dictionary with the data to assign to :attr:`table`.
"""
# Allow for single files
_files = files if hasattr(files, '__len__') else [files]
# Build lists to fill
data = {k:[] for k in self.spectrograph.meta.keys()}
data['directory'] = ['None']*len(_files)
data['filename'] = ['None']*len(_files)
# Build the table
for idx, ifile in enumerate(_files):
# User data (for frame type)
if usrdata is None:
usr_row = None
else:
# TODO: This check should be done elsewhere
# Check
if os.path.basename(ifile) != usrdata['filename'][idx]:
msgs.error('File name list does not match user-provided metadata table. See '
'usrdata argument of instantiation of PypeItMetaData.')
usr_row = usrdata[idx]
# Add the directory and file name to the table
data['directory'][idx], data['filename'][idx] = os.path.split(ifile)
if not data['directory'][idx]:
data['directory'][idx] = '.'
# Read the fits headers
headarr = self.spectrograph.get_headarr(ifile, strict=strict)
# Grab Meta
for meta_key in self.spectrograph.meta.keys():
value = self.spectrograph.get_meta_value(headarr, meta_key,
required=strict,
usr_row=usr_row,
ignore_bad_header = self.par['rdx']['ignore_bad_headers'])
if isinstance(value, str) and '#' in value:
value = value.replace('#', '')
msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(
meta_key, value))
data[meta_key].append(value)
msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1]))
# JFH Changed the below to not crash if some files have None in
# their MJD. This is the desired behavior since if there are
# empty or corrupt files we still want this to run.
# Validate, print out a warning if there is problem
try:
time.Time(data['mjd'], format='mjd')
except ValueError:
mjd = np.asarray(data['mjd'])
filenames = np.asarray(data['filename'])
bad_files = filenames[mjd == None]
# Print status message
msg = 'Time invalid for {0} files.\n'.format(len(bad_files))
msg += 'Continuing, but the following frames may be empty or have corrupt headers:\n'
for file in bad_files:
msg += ' {0}\n'.format(file)
msgs.warn(msg)
# Return
return data
# TODO: In this implementation, slicing the PypeItMetaData object
# will return an astropy.table.Table, not a PypeItMetaData object.
def __getitem__(self, item):
return self.table.__getitem__(item)
def __setitem__(self, item, value):
return self.table.__setitem__(item, value)
def __len__(self):
return self.table.__len__()
def __repr__(self):
return self.table._base_repr_(html=False,
descr_vals=['PypeItMetaData:\n',
' spectrograph={0}\n'.format(
self.spectrograph.name),
' length={0}\n'.format(len(self))])
def _repr_html_(self):
return self.table._base_repr_(html=True, max_width=-1,
descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\n'.format(
self.spectrograph.name, len(self))])
@staticmethod
def default_keys():
return [ 'directory', 'filename', 'instrume' ]
def keys(self):
return self.table.keys()
def sort(self, col):
return self.table.sort(col)
def merge(self, usrdata, match_type=True):
"""
Use the provided table to supplement or overwrite the metadata.
If the internal table already contains the column in `usrdata`,
the function will try to match the data type of the `usrdata`
column to the existing data type. If it can't it will just add
the column anyway, with the type in `usrdata`. You can avoid
this step by setting `match_type=False`.
Args:
usrdata (:obj:`astropy.table.Table`):
A user provided set of data used to supplement or
overwrite metadata read from the file headers. The
table must have a `filename` column that is used to
match to the metadata table generated within PypeIt.
match_type (:obj:`bool`, optional):
Attempt to match the data type in `usrdata` to the type
in the internal table. See above.
Raises:
TypeError:
Raised if `usrdata` is not an `astropy.io.table.Table`
KeyError:
Raised if `filename` is not a key in the provided table.
"""
meta_data_model = meta.get_meta_data_model()
# Check the input
if not isinstance(usrdata, table.Table):
raise TypeError('Must provide an astropy.io.table.Table instance.')
if 'filename' not in usrdata.keys():
raise KeyError('The user-provided table must have \'filename\' column!')
# Make sure the data are correctly ordered
srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']]
# Convert types if possible
existing_keys = list(set(self.table.keys()) & set(usrdata.keys()))
radec_done = False
if len(existing_keys) > 0 and match_type:
for key in existing_keys:
if len(self.table[key].shape) > 1: # NOT ALLOWED!!
# TODO: This should be converted to an assert statement...
raise ValueError('CODING ERROR: Found high-dimensional column.')
#embed(header='372 of metadata')
elif key in meta_data_model.keys(): # Is this meta data??
dtype = meta_data_model[key]['dtype']
else:
dtype = self.table[key].dtype
# Deal with None's properly
nones = usrdata[key] == 'None'
usrdata[key][nones] = None
# Rest
# Allow for str RA, DEC (backwards compatability)
if key in ['ra', 'dec'] and not radec_done:
ras, decs = meta.convert_radec(usrdata['ra'][~nones].data,
usrdata['dec'][~nones].data)
usrdata['ra'][~nones] = ras.astype(dtype)
usrdata['dec'][~nones] = decs.astype(dtype)
radec_done = True
else:
usrdata[key][~nones] = usrdata[key][~nones].astype(dtype)
# Include the user data in the table
for key in usrdata.keys():
self.table[key] = usrdata[key][srt]
def finalize_usr_build(self, frametype, setup):
"""
Finalize the build of the table based on user-provided data,
typically pulled from the PypeIt file.
This function:
- sets the frame types based on the provided object
- sets all the configurations to the provided `setup`
- assigns all frames to a single calibration group, if the
'calib' column does not exist
- if the 'comb_id' column does not exist, this sets the
combination groups to be either undefined or to be unique
for each science or standard frame, see
:func:`set_combination_groups`.
.. note::
This should only be run if all files are from a single
instrument configuration. :attr:`table` is modified
in-place.
See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`.
.. todo::
- Why isn't frametype just in the user-provided data? It
may be (see get_frame_types) and I'm just not using it...
Args:
frametype (:obj:`dict`):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
setup (:obj:`str`):
If the 'setup' columns does not exist, fill the
configuration setup columns with this single identifier.
"""
self.get_frame_types(user=frametype)
# TODO: Add in a call to clean_configurations? I didn't add it
# here, because this method is only called for a preconstructed
# pypeit file, which should nominally follow an execution of
# pypeit_setup. If the user edits back in a frame that has an
# invalid key, at least for now the DEIMOS image reader will
# fault.
self.set_configurations(fill=setup)
self.set_calibration_groups(default=True)
self.set_combination_groups()
def get_configuration(self, indx, cfg_keys=None):
"""
Return the configuration dictionary for a given frame.
This is not the same as the backwards compatible "setup"
dictionary.
Args:
indx (:obj:`int`):
The index of the table row to use to construct the
configuration.
cfg_keys (:obj:`list`, optional):
The list of metadata keys to use to construct the
configuration. If None, the `configuration_keys` of
:attr:`spectrograph` is used.
Returns:
dict: A dictionary with the metadata values from the
selected row.
"""
_cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys
return {k:self.table[k][indx] for k in _cfg_keys}
def master_key(self, row, det=1):
"""
Construct the master key for the file in the provided row.
The master key is the combination of the configuration, the
calibration group, and the detector. The configuration ID is
the same as included in the configuration column (A, B, C, etc),
the calibration group is the same as the calibration bit number,
and the detector number is provided as an argument and converted
to a zero-filled string with two digits (the maximum number of
detectors is 99).
Using the calibration bit in the keyword allows MasterFrames to
be used with multiple calibration groups.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the key.
det (:obj:`int`, :obj:`tuple`, optional):
The 1-indexed detector number(s). If a tuple, it must include
detectors designated as a viable mosaic for
:attr:`spectrograph`; see
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`.
Returns:
:obj:`str`: Master key with configuration, calibration group(s), and
detector.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns
haven't been defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot provide master key string without setup and calibbit; '
'run set_configurations and set_calibration_groups.')
det_name = self.spectrograph.get_det_name(det)
return f"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}"
def construct_obstime(self, row):
"""
Construct the MJD of when the frame was observed.
.. todo::
- Consolidate with :func:`convert_time` ?
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
Returns:
astropy.time.Time: The MJD of the observation.
"""
return time.Time(self['mjd'][row], format='mjd')
def construct_basename(self, row, obstime=None):
"""
Construct the root name primarily for PypeIt file output.
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
obstime (:class:`astropy.time.Time`, optional):
The MJD of the observation. If None, constructed using
:func:`construct_obstime`.
Returns:
str: The root name for file output.
"""
_obstime = self.construct_obstime(row) if obstime is None else obstime
tiso = time.Time(_obstime, format='isot')
dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')
return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],
self['target'][row].replace(" ", ""),
self.spectrograph.camera,
datetime.datetime.strftime(dtime, '%Y%m%dT'),
tiso.value.split("T")[1].replace(':',''))
def get_setup(self, row, det=None, config_only=False):
"""
Construct the setup dictionary.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting it. And it may be something to put
in the relevant spectrograph class.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the setup.
det (:obj:`int`, optional):
The 1-indexed detector to include. If None, all
detectors are included.
config_only (:obj:`bool`, optional):
Just return the dictionary with the configuration, don't
include the top-level designation of the configuration
itself.
Returns:
dict: The pypeit setup dictionary with the default format.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot provide instrument setup without \'setup\' column; '
'run set_configurations.')
dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row]
dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row]
dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row]
decker = 'none' if 'decker' not in self.keys() else self['decker'][row]
slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row]
slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row]
binning = '1,1' if 'binning' not in self.keys() else self['binning'][row]
skey = 'Setup {}'.format(self['setup'][row])
# Key names *must* match configuration_keys() for spectrographs
setup = {skey:
{'--':
{'disperser': {'dispname': dispname, 'dispangle':dispangle},
'dichroic': dichroic,
'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen},
'binning': binning, # PypeIt orientation binning of a science image
}
}
}
#_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det]
#for d in _det:
# setup[skey][str(d).zfill(2)] \
# = {'binning': binning, 'det': d,
# 'namp': self.spectrograph.detector[d-1]['numamplifiers']}
return setup[skey] if config_only else setup
def get_configuration_names(self, ignore=None, return_index=False, configs=None):
"""
Get the list of the unique configuration names.
This provides just the list of setup identifiers ('A', 'B',
etc.) and the row index where it first occurs. This is
different from :func:`unique_configurations` because the latter
determines and provides the configurations themselves.
This is mostly a convenience function for the writing routines.
Args:
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
return_index (:obj:`bool`, optional):
Return row indices with the first occurence of these
configurations.
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']).
Returns:
numpy.array: The list of unique setup names. A second
returned object provides the indices of the first occurrence
of these setups, if requested.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot get setup names; run set_configurations.')
# Unique configurations
setups, indx = np.unique(self['setup'], return_index=True)
if ignore is not None:
# Remove the selected configurations to ignore
rm = np.logical_not(np.isin(setups, ignore))
setups = setups[rm]
indx = indx[rm]
# Restrict
_configs = None if configs is None else np.atleast_1d(configs)
# TODO: Why do we need to specify 'all' here? Can't `configs is
# None` mean that you want all the configurations? Or can we
# make the default 'all'?
if configs is not None and 'all' not in _configs:
use = np.isin(setups, _configs)
setups = setups[use]
indx = indx[use]
return setups, indx if return_index else setups
def _get_cfgs(self, copy=False, rm_none=False):
"""
Convenience method to return :attr:`configs` with possible
alterations.
This method *should not* be called by any method outside of
this class; use :func:`unique_configurations` instead.
Args:
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
"""
_cfg = deepcopy(self.configs) if copy else self.configs
if rm_none and 'None' in _cfg.keys():
del _cfg['None']
return _cfg
def unique_configurations(self, force=False, copy=False, rm_none=False):
"""
Return the unique instrument configurations.
If run before the ``'setup'`` column is initialized, this function
determines the unique instrument configurations by finding
unique combinations of the items in the metadata table listed by
the spectrograph ``configuration_keys`` method.
If run after the ``'setup'`` column has been set, this simply
constructs the configuration dictionary using the unique
configurations in that column.
This is used to set the internal :attr:`configs`. If this
attribute is not None, this function simply returns
:attr:`config` (cf. ``force``).
.. warning::
Any frame types returned by the
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
method for :attr:`spectrograph` will be ignored in the
construction of the unique configurations. If
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
does not return None and the frame types have not yet
been defined (see :func:`get_frame_types`), this method
will fault!
Args:
force (:obj:`bool`, optional):
Force the configurations to be redetermined. Otherwise
the configurations are only determined if
:attr:`configs` has not yet been defined.
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
Raises:
PypeItError:
Raised if there are list of frame types to ignore but
the frame types have not been defined yet.
"""
if self.configs is not None and not force:
return self._get_cfgs(copy=copy, rm_none=rm_none)
if 'setup' in self.keys():
msgs.info('Setup column already set. Finding unique configurations.')
uniq, indx = np.unique(self['setup'], return_index=True)
ignore = uniq == 'None'
if np.sum(ignore) > 0:
msgs.warn('Ignoring {0} frames with configuration set to None.'.format(
np.sum(ignore)))
self.configs = {}
for i in range(len(uniq)):
if ignore[i]:
continue
self.configs[uniq[i]] = self.get_configuration(indx[i])
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
msgs.info('Using metadata to determine unique configurations.')
# If the frame types have been set, ignore anything listed in
# the ignore_frames
indx = np.arange(len(self))
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To ignore frames, types must have been defined; run get_frame_types.')
ignore_frames = list(ignore_frames.keys())
msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames))
use = np.ones(len(self), dtype=bool)
for ftype in ignore_frames:
use &= np.logical_not(self.find_frames(ftype))
indx = indx[use]
if len(indx) == 0:
msgs.error('No frames to use to define configurations!')
# Get the list of keys to use
cfg_keys = self.spectrograph.configuration_keys()
# Configuration identifiers are iterations through the
# upper-case letters: A, B, C, etc.
double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase]
cfg_iter = list(string.ascii_uppercase) + double_alphabet
cfg_indx = 0
# TODO: Placeholder: Allow an empty set of configuration keys
# meaning that the instrument setup has only one configuration.
if len(cfg_keys) == 0:
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = {}
msgs.info('All files assumed to be from a single configuration.')
return self._get_cfgs(copy=copy, rm_none=rm_none)
# Use the first file to set the first unique configuration
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys)
cfg_indx += 1
# Check if any of the other files show a different
# configuration.
for i in indx[1:]:
j = 0
for c in self.configs.values():
if row_match_config(self.table[i], c, self.spectrograph):
break
j += 1
unique = j == len(self.configs)
if unique:
if cfg_indx == len(cfg_iter):
msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter)))
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys)
cfg_indx += 1
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
def set_configurations(self, configs=None, force=False, fill=None):
"""
Assign each frame to a configuration (setup) and include it
in the metadata table.
The internal table is edited *in place*. If the 'setup'
column already exists, the configurations are **not** reset
unless you call the function with ``force=True``.
Args:
configs (:obj:`dict`, optional):
A nested dictionary, one dictionary per configuration
with the associated values of the metadata associated
with each configuration. The metadata keywords in the
dictionary should be the same as in the table, and the
keywords used to set the configuration should be the
same as returned by the spectrograph
`configuration_keys` method. The latter is not checked.
If None, this is set by :func:`unique_configurations`.
force (:obj:`bool`, optional):
Force the configurations to be reset.
fill (:obj:`str`, optional):
If the 'setup' column does not exist, fill the
configuration setup columns with this single identifier.
Ignores other inputs.
Raises:
PypeItError:
Raised if none of the keywords in the provided
configuration match with the metadata keywords. Also
raised when some frames cannot be assigned to a
configuration, the spectrograph defined frames that
have been ignored in the determination of the unique
configurations, but the frame types have not been set
yet.
"""
# Configurations have already been set
if 'setup' in self.keys() and not force:
return
if 'setup' not in self.keys() and fill is not None:
self['setup'] = fill
return
_configs = self.unique_configurations() if configs is None else configs
for k, cfg in _configs.items():
if len(set(cfg.keys()) - set(self.keys())) > 0:
msgs.error('Configuration {0} defined using unavailable keywords!'.format(k))
self.table['setup'] = 'None'
nrows = len(self)
for i in range(nrows):
for d, cfg in _configs.items():
if row_match_config(self.table[i], cfg, self.spectrograph):
self.table['setup'][i] = d
# Check if any of the configurations are not set
not_setup = self.table['setup'] == 'None'
if not np.any(not_setup):
# All are set, so we're done
return
# Some frame types may have been ignored
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is None:
# Nope, we're still done
return
# At this point, we need the frame type to continue
if 'frametype' not in self.keys():
msgs.error('To account for ignored frames, types must have been defined; run '
'get_frame_types.')
# For each configuration, determine if any of the frames with
# the ignored frame types should be assigned to it:
for cfg_key in _configs.keys():
in_cfg = self.table['setup'] == cfg_key
for ftype, metakey in ignore_frames.items():
# TODO: For now, use this assert to check that the
# metakey is either not set or a string
assert metakey is None or isinstance(metakey, str), \
'CODING ERROR: metadata keywords set by config_indpendent_frames are not ' \
'correctly defined for {0}; values must be None or a string.'.format(
self.spectrograph.__class__.__name__)
# Get the list of frames of this type without a
# configuration
indx = (self.table['setup'] == 'None') & self.find_frames(ftype)
if not np.any(indx):
continue
if metakey is None:
# No matching meta data defined, so just set all
# the frames to this (first) configuration
self.table['setup'][indx] = cfg_key
continue
# Find the unique values of meta for this configuration
uniq_meta = np.unique(self.table[metakey][in_cfg].data)
# Warn the user that the matching meta values are not
# unique for this configuration.
if uniq_meta.size != 1:
msgs.warn('When setting the instrument configuration for {0} '.format(ftype)
+ 'frames, configuration {0} does not have unique '.format(cfg_key)
+ '{0} values.' .format(meta))
# Find the frames of this type that match any of the
# meta data values
indx &= np.isin(self.table[metakey], uniq_meta)
self.table['setup'][indx] = cfg_key
def clean_configurations(self):
"""
Ensure that configuration-defining keywords all have values
that will yield good PypeIt reductions. Any frames that do
not are removed from :attr:`table`, meaning this method may
modify that attribute directly.
The valid values for configuration keys is set by
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`.
"""
cfg_limits = self.spectrograph.valid_configuration_values()
if cfg_limits is None:
# No values specified, so we're done
return
good = np.ones(len(self), dtype=bool)
for key in cfg_limits.keys():
# NOTE: For now, check that the configuration values were
# correctly assigned in the spectrograph class definition.
# This should probably go somewhere else or just removed.
assert isinstance(cfg_limits[key], list), \
'CODING ERROR: valid_configuration_values is not correctly defined ' \
'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)
# Check that the metadata are valid for this column.
indx = np.isin(self[key], cfg_limits[key])
if not np.all(indx):
msgs.warn('Found frames with invalid {0}.'.format(key))
good &= indx
if np.all(good):
# All values good, so we're done
return
# Alert the user that some of the frames are going to be
# removed
msg = 'The following frames have configurations that cannot be reduced by PypeIt' \
' and will be removed from the metadata table (pypeit file):\n'
indx = np.where(np.logical_not(good))[0]
for i in indx:
msg += ' {0}\n'.format(self['filename'][i])
msgs.warn(msg)
# And remove 'em
self.table = self.table[good]
def _set_calib_group_bits(self):
"""
Set the calibration group bit based on the string values of the
'calib' column.
"""
# Find the number groups by searching for the maximum number
# provided, regardless of whether or not a science frame is
# assigned to that group.
ngroups = 0
for i in range(len(self)):
if self['calib'][i] in ['all', 'None']:
# No information, keep going
continue
# Convert to a list of numbers
l = np.amax([ 0 if len(n) == 0 else int(n)
for n in self['calib'][i].replace(':',',').split(',')])
# Check against current maximum
ngroups = max(l+1, ngroups)
# Define the bitmask and initialize the bits
self.calib_bitmask = BitMask(np.arange(ngroups))
self['calibbit'] = 0
# Set the calibration bits
for i in range(len(self)):
# Convert the string to the group list
grp = parse.str2list(self['calib'][i], ngroups)
if grp is None:
# No group selected
continue
# Assign the group; ensure the integers are unique
self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp)
def _check_calib_groups(self):
"""
Check that the calibration groups are valid.
This currently only checks that the science frames are
associated with one calibration group.
TODO: Is this appropriate for NIR data?
"""
is_science = self.find_frames('science')
for i in range(len(self)):
if not is_science[i]:
continue
if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:
msgs.error('Science frames can only be assigned to a single calibration group.')
@property
def n_calib_groups(self):
"""Return the number of calibration groups."""
return None if self.calib_bitmask is None else self.calib_bitmask.nbits
def set_calibration_groups(self, global_frames=None, default=False, force=False):
"""
Group calibration frames into sets.
Requires the 'setup' column to have been defined. For now this
is a simple grouping of frames with the same configuration.
.. todo::
- Maintain a detailed description of the logic.
The 'calib' column has a string type to make sure that it
matches with what can be read from the pypeit file. The
'calibbit' column is actually what is used to determine the
calibration group of each frame; see :attr:`calib_bitmask`.
Args:
global_frames (:obj:`list`, optional):
A list of strings with the frame types to use in all
calibration groups (e.g., ['bias', 'dark']).
default (:obj:`bool`, optional):
If the 'calib' column is not present, set a single
calibration group *for all rows*.
force (:obj:`bool`, optional):
Force the calibration groups to be reconstructed if
the 'calib' column already exists.
Raises:
PypeItError:
Raised if 'setup' column is not defined, or if
`global_frames` is provided but the frame types have not
been defined yet.
"""
# Set the default if requested and 'calib' doesn't exist yet
if 'calib' not in self.keys() and default:
self['calib'] = '0'
# Make sure the calibbit column does not exist
if 'calibbit' in self.keys():
del self['calibbit']
# Groups have already been set
if 'calib' in self.keys() and 'calibbit' in self.keys() and not force:
return
# Groups have been set but the bits have not (likely because the
# data was read from a pypeit file)
if 'calib' in self.keys() and 'calibbit' not in self.keys() and not force:
self._set_calib_group_bits()
self._check_calib_groups()
return
# TODO: The rest of this just nominally sets the calibration
# group based on the configuration. This will change!
# The configuration must be present to determine the calibration
# group
if 'setup' not in self.keys():
msgs.error('Must have defined \'setup\' column first; try running set_configurations.')
configs = np.unique(self['setup'].data).tolist()
if 'None' in configs:
configs.remove('None') # Ignore frames with undefined configurations
n_cfg = len(configs)
# TODO: Science frames can only have one calibration group
# Assign everything from the same configuration to the same
# calibration group; this needs to have dtype=object, otherwise
# any changes to the strings will be truncated at 4 characters.
self.table['calib'] = np.full(len(self), 'None', dtype=object)
for i in range(n_cfg):
self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i)
# Allow some frame types to be used in all calibration groups
# (like biases and darks)
if global_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To set global frames, types must have been defined; '
'run get_frame_types.')
calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str))
for ftype in global_frames:
indx = np.where(self.find_frames(ftype))[0]
for i in indx:
self['calib'][i] = calibs
# Set the bits based on the string representation of the groups
self._set_calib_group_bits()
# Check that the groups are valid
self._check_calib_groups()
def find_frames(self, ftype, calib_ID=None, index=False):
"""
Find the rows with the associated frame type.
If the index is provided, the frames must also be matched to the
relevant science frame.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`. If
set to the string 'None', this returns all frames
without a known type.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
index (:obj:`bool`, optional):
Return an array of 0-indexed indices instead of a
boolean array.
Returns:
numpy.ndarray: A boolean array, or an integer array if
index=True, with the rows that contain the frames of the
requested type.
Raises:
PypeItError:
Raised if the `framebit` column is not set in the table.
"""
if 'framebit' not in self.keys():
msgs.error('Frame types are not set. First run get_frame_types.')
if ftype == 'None':
return self['framebit'] == 0
# Select frames
indx = self.type_bitmask.flagged(self['framebit'], ftype)
if calib_ID is not None:
# Select frames in the same calibration group
indx &= self.find_calib_group(calib_ID)
# Return
return np.where(indx)[0] if index else indx
def find_frame_files(self, ftype, calib_ID=None):
"""
Return the list of files with a given frame type.
The frames must also match the science frame index, if it is
provided.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
Returns:
list: List of file paths that match the frame type and
science frame ID, if the latter is provided.
"""
return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))
def frame_paths(self, indx):
"""
Return the full paths to one or more frames.
Args:
indx (:obj:`int`, array-like):
One or more 0-indexed rows in the table with the frames
to return. Can be an array of indices or a boolean
array of the correct length.
Returns:
list: List of the full paths of one or more frames.
"""
if isinstance(indx, (int,np.integer)):
return os.path.join(self['directory'][indx], self['filename'][indx])
return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]
def set_frame_types(self, type_bits, merge=True):
"""
Set and return a Table with the frame types and bits.
Args:
type_bits (numpy.ndarray):
Integer bitmask with the frame types. The length must
match the existing number of table rows.
merge (:obj:`bool`, optional):
Merge the types and bits into the existing table. This
will *overwrite* any existing columns.
Returns:
`astropy.table.Table`: Table with two columns, the frame
type name and bits.
"""
# Making Columns to pad string array
ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype')
# KLUDGE ME
#
# TODO: It would be good to get around this. Is it related to
# this change?
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3
#
# See also:
#
# http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode
#
# Or we can force type_names() in bitmask to always return the
# correct type...
if int(str(ftype_colmA.dtype)[2:]) < 9:
ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9',
name='frametype')
else:
ftype_colm = ftype_colmA
fbits_colm = table.Column(type_bits, name='framebit')
t = table.Table([ftype_colm, fbits_colm])
if merge:
self['frametype'] = t['frametype']
self['framebit'] = t['framebit']
return t
def edit_frame_type(self, indx, frame_type, append=False):
"""
Edit the frame type by hand.
Args:
indx (:obj:`int`):
The 0-indexed row in the table to edit
frame_type (:obj:`str`, :obj:`list`):
One or more frame types to append/overwrite.
append (:obj:`bool`, optional):
Append the frame type. If False, all existing frame
types are overwitten by the provided type.
"""
if not append:
self['framebit'][indx] = 0
self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type)
self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx])
def get_frame_types(self, flag_unknown=False, user=None, merge=True):
"""
Generate a table of frame types from the input metadata object.
.. todo::
- Here's where we could add a SPIT option.
Args:
flag_unknown (:obj:`bool`, optional):
Instead of crashing out if there are unidentified files,
leave without a type and continue.
user (:obj:`dict`, optional):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
merge (:obj:`bool`, optional):
Merge the frame typing into the exiting table.
Returns:
:obj:`astropy.table.Table`: A Table with two columns, the
type names and the type bits. See
:class:`pypeit.core.framematch.FrameTypeBitMask` for the
allowed frame types.
"""
# Checks
if 'frametype' in self.keys() or 'framebit' in self.keys():
msgs.warn('Removing existing frametype and framebit columns.')
if 'frametype' in self.keys():
del self.table['frametype']
if 'framebit' in self.keys():
del self.table['framebit']
# # TODO: This needs to be moved into each Spectrograph
# if useIDname and 'idname' not in self.keys():
# raise ValueError('idname is not set in table; cannot use it for file typing.')
# Start
msgs.info("Typing files")
type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype())
# Use the user-defined frame types from the input dictionary
if user is not None:
if len(user.keys()) != len(self):
raise ValueError('The user-provided dictionary does not match table length.')
msgs.info('Using user-provided frame types.')
for ifile,ftypes in user.items():
indx = self['filename'] == ifile
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(','))
return self.set_frame_types(type_bits, merge=merge)
# Loop over the frame types
for i, ftype in enumerate(self.type_bitmask.keys()):
# # Initialize: Flag frames with the correct ID name or start by
# # flagging all as true
# indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \
# else np.ones(len(self), dtype=bool)
# Include a combination of instrument-specific checks using
# combinations of the full set of metadata
exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \
else self.par['calibrations']['{0}frame'.format(ftype)]['exprng']
# TODO: Use & or | ? Using idname above gets overwritten by
# this if the frames to meet the other checks in this call.
# indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
# Turn on the relevant bits
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype)
# Find the nearest standard star to each science frame
# TODO: Should this be 'standard' or 'science' or both?
if 'ra' not in self.keys() or 'dec' not in self.keys():
msgs.warn('Cannot associate standard with science frames without sky coordinates.')
else:
# TODO: Do we want to do this here?
indx = self.type_bitmask.flagged(type_bits, flag='standard')
for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx],
self['dec'][indx]):
if ra == 'None' or dec == 'None':
msgs.warn('RA and DEC must not be None for file:' + msgs.newline() + f)
msgs.warn('The above file could be a twilight flat frame that was'
+ msgs.newline() + 'missed by the automatic identification.')
b = self.type_bitmask.turn_off(b, flag='standard')
continue
# If an object exists within 20 arcmins of a listed standard,
# then it is probably a standard star
foundstd = flux_calib.find_standard_file(ra, dec, check=True)
b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard')
# Find the files without any types
indx = np.logical_not(self.type_bitmask.flagged(type_bits))
if np.any(indx):
msgs.info("Couldn't identify the following files:")
for f in self['filename'][indx]:
msgs.info(f)
if not flag_unknown:
msgs.error("Check these files before continuing")
# Finish up (note that this is called above if user is not None!)
msgs.info("Typing completed!")
return self.set_frame_types(type_bits, merge=merge)
def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):
"""
Generate the list of columns to be included in the fitstbl
(nearly the complete list).
Args:
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Returns:
`numpy.ndarray`_: Array of columns to be used in the fits
table>
"""
# Columns for output
columns = self.spectrograph.pypeit_file_keys()
extras = []
# comb, bkg columns
if write_bkg_pairs:
extras += ['calib', 'comb_id', 'bkg_id']
# manual
if write_manual:
extras += ['manual']
for key in extras:
if key not in columns:
columns += [key]
# Take only those present
output_cols = np.array(columns)
return output_cols[np.isin(output_cols, self.keys())].tolist()
def set_combination_groups(self, assign_objects=True):
"""
Set combination groups.
.. note::
:attr:`table` is edited in place.
This function can be used to initialize the combination group
and background group columns, and/or to initialize the combination
groups to the set of objects (science or standard frames) to a
unique integer.
If the 'comb_id' or 'bkg_id' columns do not exist, they're set
to -1.
Args:
assign_objects (:obj:`bool`, optional):
If all of 'comb_id' values are less than 0 (meaning
they're unassigned), the combination groups are set to
be unique for each standard and science frame.
"""
if 'comb_id' not in self.keys():
self['comb_id'] = -1
if 'bkg_id' not in self.keys():
self['bkg_id'] = -1
if assign_objects and np.all(self['comb_id'] < 0):
# find_frames will throw an exception if framebit is not
# set...
sci_std_idx = np.where(np.any([self.find_frames('science'),
self.find_frames('standard')], axis=0))[0]
self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1
def set_user_added_columns(self):
"""
Set columns that the user *might* add
.. note::
:attr:`table` is edited in place.
This function can be used to initialize columns
that the user might add
"""
if 'manual' not in self.keys():
self['manual'] = ''
def write_sorted(self, ofile, overwrite=True, ignore=None,
write_bkg_pairs=False, write_manual=False):
"""
Write the sorted file.
The sorted file lists all the unique instrument configurations
(setups) and the frames associated with each configuration. The
output data table is identical to the pypeit file output.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot write sorted instrument configuration table without \'setup\' '
'column; run set_configurations.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
cfgs = self.unique_configurations(copy=ignore is not None)
if ignore is not None:
for key in cfgs.keys():
if key in ignore:
del cfgs[key]
# Construct file
ff = open(ofile, 'w')
for setup in cfgs.keys():
# Get the subtable of frames taken in this configuration
indx = self['setup'] == setup
if not np.any(indx):
continue
subtbl = self.table[output_cols][indx]
# Write the file
ff.write('##########################################################\n')
ff.write('Setup {:s}\n'.format(setup))
ff.write('\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\n')
ff.write('#---------------------------------------------------------\n')
mjd = subtbl['mjd'].copy()
# Deal with possibly None mjds if there were corrupt header cards
mjd[mjd == None] = -99999.0
isort = np.argsort(mjd)
subtbl = subtbl[isort]
subtbl.write(ff, format='ascii.fixed_width')
ff.write('##end\n')
ff.close()
# TODO: Do we need a calib file?
def write_calib(self, ofile, overwrite=True, ignore=None):
"""
Write the calib file.
The calib file provides the unique instrument configurations
(setups) and the association of each frame from that
configuration with a given calibration group.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
- This is complicated by allowing some frame types to have
no association with an instrument configuration
- This is primarily used for QA now; but could probably use the pypeit file instead
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore calibration groups in the provided list.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns haven't been
defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot write calibration groups without \'setup\' and \'calibbit\' '
'columns; run set_configurations and set_calibration_groups.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Construct the setups dictionary
cfg = self.unique_configurations(copy=True, rm_none=True)
# TODO: We should edit the relevant follow-on code so that we
# don't have to do these gymnastics. Or better yet, just stop
# producing/using the *.calib file.
_cfg = {}
for setup in cfg.keys():
_cfg[setup] = {}
_cfg[setup]['--'] = deepcopy(cfg[setup])
cfg = _cfg
# Iterate through the calibration bit names as these are the root of the
# MasterFrames and QA
for icbit in np.unique(self['calibbit'].data):
cbit = int(icbit) # for yaml
# Skip this group
if ignore is not None and cbit in ignore:
continue
# Find the frames in this group
#in_group = self.find_calib_group(i)
in_cbit = self['calibbit'] == cbit
# Find the unique configurations in this group, ignoring any
# undefined ('None') configurations
#setup = np.unique(self['setup'][in_group]).tolist()
setup = np.unique(self['setup'][in_cbit]).tolist()
if 'None' in setup:
setup.remove('None')
# Make sure that each calibration group should only contain
# frames from a single configuration
if len(setup) != 1:
msgs.error('Each calibration group must be from one and only one instrument '
'configuration with a valid letter identifier; i.e., the '
'configuration cannot be None.')
# Find the frames of each type in this group
cfg[setup[0]][cbit] = {}
for key in self.type_bitmask.keys():
#ftype_in_group = self.find_frames(key) & in_group
ftype_in_group = self.find_frames(key) & in_cbit
cfg[setup[0]][cbit][key] = [ os.path.join(d,f)
for d,f in zip(self['directory'][ftype_in_group],
self['filename'][ftype_in_group])]
# Write it
ff = open(ofile, 'w')
ff.write(yaml.dump(utils.yamlify(cfg)))
ff.close()
def write_pypeit(self, output_path=None, cfg_lines=None,
write_bkg_pairs=False, write_manual=False,
configs=None):
"""
Write a pypeit file in data-table format.
The pypeit file is the main configuration file for PypeIt,
configuring the control-flow and algorithmic parameters and
listing the data files to read. This function writes the
columns selected by the
:func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`,
which can be specific to each instrument.
Args:
output_path (:obj:`str`, optional):
Root path for the output pypeit files. If None, set
to current directory. If the output directory does
not exist, it is created.
cfg_lines (:obj:`list`, optional):
The list of configuration lines to include in the file.
If None are provided, the vanilla configuration is
included.
write_bkg_pairs (:obj:`bool`, optional):
When constructing the
:class:`pypeit.metadata.PypeItMetaData` object, include
two columns called `comb_id` and `bkg_id` that identify
object and background frame pairs.
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']). See
:attr:`configs`.
Raises:
PypeItError:
Raised if the 'setup' isn't defined and split is True.
Returns:
:obj:`list`: List of ``PypeIt`` files generated.
"""
# Set output path
if output_path is None:
output_path = os.getcwd()
# Find unique configurations, always ignoring any 'None'
# configurations...
cfg = self.unique_configurations(copy=True, rm_none=True)
# Get the setups to write
if configs is None or configs == 'all' or configs == ['all']:
cfg_keys = list(cfg.keys())
else:
_configs = configs if isinstance(configs, list) else [configs]
cfg_keys = [key for key in cfg.keys() if key in _configs]
if len(cfg_keys) == 0:
msgs.error('No setups to write!')
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
# Write the pypeit files
ofiles = [None]*len(cfg_keys)
for j,setup in enumerate(cfg_keys):
# Create the output directory
root = '{0}_{1}'.format(self.spectrograph.name, setup)
odir = os.path.join(output_path, root)
if not os.path.isdir(odir):
os.makedirs(odir)
# Create the output file name
ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))
# Get the setup lines
setup_lines = dict_to_lines({'Setup {0}'.format(setup):
utils.yamlify(cfg[setup])}, level=1)
# Get the paths
in_cfg = self['setup'] == setup
if not np.any(in_cfg):
continue
paths = np.unique(self['directory'][in_cfg]).tolist()
# Get the data lines
subtbl = self.table[output_cols][in_cfg]
subtbl.sort(['frametype','filename'])
with io.StringIO() as ff:
subtbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
# Write the file
make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,
setup_lines=setup_lines, sorted_files=data_lines, paths=paths)
# Return
return ofiles
def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False,
header=None):
"""
Write the metadata either to a file or to the screen.
The method allows you to set the columns to print and which column to
use for sorting.
Args:
output (:obj:`str`, optional):
Output signature or file name. If None, the table contents
are printed to the screen. If ``'table'``, the table that
would have been printed/written to disk is returned.
Otherwise, the string is interpreted as the name of an ascii
file to which to write the table contents.
rows (`numpy.ndarray`_, optional):
A boolean vector selecting the rows of the table to write. If
None, all rows are written. Shape must match the number of
the rows in the table.
columns (:obj:`str`, :obj:`list`, optional):
A list of columns to include in the output file. Can be
provided as a list directly or as a comma-separated string.
If None or ``'all'``, all columns in are written; if
``'pypeit'``, the columns are the same as those included in
the pypeit file. Each selected column must be a valid pypeit
metadata keyword, specific to :attr:`spectrograph`.
Additional valid keywords, depending on the processing level
of the metadata table, are directory, filename, frametype,
framebit, setup, calib, and calibbit.
sort_col (:obj:`str`, optional):
Name of the column to use for sorting the output. If
None, the table is printed in its current state.
overwrite (:obj:`bool`, optional):
Overwrite any existing file; otherwise raise an
exception.
header (:obj:`str`, :obj:`list`, optional):
One or more strings to write to the top of the file, on
string per file line; ``# `` is added to the beginning of
each string. Ignored if ``output`` does not specify an output
file.
Returns:
`astropy.table.Table`: The table object that would have been
written/printed if ``output == 'table'``. Otherwise, the method
always returns None.
Raises:
ValueError:
Raised if the columns to include are not valid, or if the
column to use for sorting is not valid.
FileExistsError:
Raised if overwrite is False and the file exists.
"""
# Check the file can be written (this is here because the spectrograph
# needs to be defined first)
ofile = None if output in [None, 'table'] else output
if ofile is not None and os.path.isfile(ofile) and not overwrite:
raise FileExistsError(f'{ofile} already exists; set flag to overwrite.')
# Check the rows input
if rows is not None and len(rows) != len(self.table):
raise ValueError('Boolean vector selecting output rows has incorrect length.')
# Get the columns to return
if columns in [None, 'all']:
tbl_cols = list(self.keys())
elif columns == 'pypeit':
tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True)
else:
all_cols = list(self.keys())
tbl_cols = columns if isinstance(columns, list) else columns.split(',')
badcol = [col not in all_cols for col in tbl_cols]
if np.any(badcol):
raise ValueError('The following columns are not valid: {0}'.format(
', '.join(tbl_cols[badcol])))
# Make sure the basic parameters are the first few columns; do them in
# reverse order so I can always insert at the beginning of the list
for col in ['framebit', 'frametype', 'filename', 'directory']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != 0:
tbl_cols.insert(0, tbl_cols.pop(indx))
# Make sure the dithers and combination and background IDs are the last
# few columns
ncol = len(tbl_cols)
for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != ncol-1:
tbl_cols.insert(ncol-1, tbl_cols.pop(indx))
# Copy the internal table so that it is unaltered
output_tbl = self.table.copy()
# Select the output rows if a vector was provided
if rows is not None:
output_tbl = output_tbl[rows]
# Select and sort the data by a given column
if sort_col is not None:
if sort_col not in self.keys():
raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.')
# Ignore any NoneTypes
indx = output_tbl[sort_col] != None
is_None = np.logical_not(indx)
srt = np.append(np.where(is_None)[0],
np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)])
output_tbl = output_tbl[tbl_cols][srt]
else:
output_tbl = output_tbl[tbl_cols]
if output == 'table':
# Instead of writing, just return the modified table
return output_tbl
# Always write the table in ascii format
with io.StringIO() as ff:
output_tbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
if ofile is None:
# Output file not defined so just print it
print('\n'.join(data_lines))
return None
# Write the output to an ascii file
with open(ofile, 'w') as f:
if header is not None:
_header = header if isinstance(header, list) else [header]
for h in _header:
f.write(f'# {h}\n')
f.write('\n')
f.write('\n'.join(data_lines))
f.write('\n')
# Just to be explicit that the method returns None when writing to a
# file...
return None
def find_calib_group(self, grp):
"""
Find all the frames associated with the provided calibration group.
Args:
grp (:obj:`int`):
The calibration group integer.
Returns:
numpy.ndarray: Boolean array selecting those frames in the
table included in the selected calibration group.
Raises:
PypeItError:
Raised if the 'calibbit' column is not defined.
"""
if 'calibbit' not in self.keys():
msgs.error('Calibration groups are not set. First run set_calibration_groups.')
return self.calib_bitmask.flagged(self['calibbit'].data, grp)
def find_frame_calib_groups(self, row):
"""
Find the calibration groups associated with a specific frame.
"""
return self.calib_bitmask.flagged_bits(self['calibbit'][row])
# TODO: Is there a reason why this is not an attribute of
# PypeItMetaData?
def row_match_config(row, config, spectrograph):
"""
Queries whether a row from the fitstbl matches the
input configuration
Args:
row (astropy.table.Row): From fitstbl
config (dict): Defines the configuration
spectrograph (pypeit.spectrographs.spectrograph.Spectrograph):
Used to grab the rtol value for float meta (e.g. dispangle)
Returns:
bool: True if the row matches the input configuration
"""
# Loop on keys in config
match = []
for k in config.keys():
# Deal with floating configs (e.g. grating angle)
if isinstance(config[k], float):
if row[k] is None:
match.append(False)
elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']:
match.append(True)
else:
match.append(False)
else:
# The np.all allows for arrays in the Table (e.g. binning)
match.append(np.all(config[k] == row[k]))
# Check
return np.all(match)
| [((1837, 11, 1837, 24), 'numpy.all', 'np.all', ({(1837, 18, 1837, 23): 'match'}, {}), '(match)', True, 'import numpy as np\n'), ((111, 28, 111, 57), 'pypeit.core.framematch.FrameTypeBitMask', 'framematch.FrameTypeBitMask', ({}, {}), '()', False, 'from pypeit.core import framematch\n'), ((292, 26, 292, 52), 'pypeit.core.meta.get_meta_data_model', 'meta.get_meta_data_model', ({}, {}), '()', False, 'from pypeit.core import meta\n'), ((459, 15, 459, 56), 'astropy.time.Time', 'time.Time', (), '', False, 'from astropy import table, coordinates, time, units\n'), ((476, 15, 476, 49), 'astropy.time.Time', 'time.Time', (), '', False, 'from astropy import table, coordinates, time, units\n'), ((477, 16, 477, 78), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(477, 43, 477, 53): 'tiso.value', (477, 55, 477, 77): '"""%Y-%m-%dT%H:%M:%S.%f"""'}, {}), "(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')", False, 'import datetime\n'), ((577, 23, 577, 66), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((691, 8, 691, 71), 'pypeit.msgs.info', 'msgs.info', ({(691, 18, 691, 70): '"""Using metadata to determine unique configurations."""'}, {}), "('Using metadata to determine unique configurations.')", False, 'from pypeit import msgs\n'), ((889, 11, 889, 23), 'numpy.all', 'np.all', ({(889, 18, 889, 22): 'good'}, {}), '(good)', True, 'import numpy as np\n'), ((900, 8, 900, 22), 'pypeit.msgs.warn', 'msgs.warn', ({(900, 18, 900, 21): 'msg'}, {}), '(msg)', False, 'from pypeit import msgs\n'), ((1166, 21, 1166, 61), 'astropy.table.Column', 'table.Column', (), '', False, 'from astropy import table, coordinates, time, units\n'), ((1167, 12, 1167, 49), 'astropy.table.Table', 'table.Table', ({(1167, 24, 1167, 48): '[ftype_colm, fbits_colm]'}, {}), '([ftype_colm, fbits_colm])', False, 'from astropy import table, coordinates, time, units\n'), ((1233, 8, 1233, 33), 'pypeit.msgs.info', 'msgs.info', ({(1233, 18, 1233, 32): '"""Typing files"""'}, {}), "('Typing files')", False, 'from pypeit import msgs\n'), ((1288, 11, 1288, 23), 'numpy.any', 'np.any', ({(1288, 18, 1288, 22): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((1296, 8, 1296, 38), 'pypeit.msgs.info', 'msgs.info', ({(1296, 18, 1296, 37): '"""Typing completed!"""'}, {}), "('Typing completed!')", False, 'from pypeit import msgs\n'), ((1332, 22, 1332, 39), 'numpy.array', 'np.array', ({(1332, 31, 1332, 38): 'columns'}, {}), '(columns)', True, 'import numpy as np\n'), ((1500, 21, 1500, 53), 'numpy.unique', 'np.unique', ({(1500, 31, 1500, 52): "self['calibbit'].data"}, {}), "(self['calibbit'].data)", True, 'import numpy as np\n'), ((103, 12, 104, 51), 'pypeit.msgs.warn', 'msgs.warn', ({(103, 22, 104, 50): '"""Both data and files are None in the instantiation of PypeItMetaData. The table will be empty!"""'}, {}), "(\n 'Both data and files are None in the instantiation of PypeItMetaData. The table will be empty!'\n )", False, 'from pypeit import msgs\n'), ((192, 60, 192, 80), 'os.path.split', 'os.path.split', ({(192, 74, 192, 79): 'ifile'}, {}), '(ifile)', False, 'import os\n'), ((218, 12, 218, 48), 'astropy.time.Time', 'time.Time', (), '', False, 'from astropy import table, coordinates, time, units\n'), ((440, 12, 441, 76), 'pypeit.msgs.error', 'msgs.error', ({(440, 23, 441, 75): '"""Cannot provide master key string without setup and calibbit; run set_configurations and set_calibration_groups."""'}, {}), "(\n 'Cannot provide master key string without setup and calibbit; run set_configurations and set_calibration_groups.'\n )", False, 'from pypeit import msgs\n'), ((481, 43, 481, 87), 'datetime.datetime.strftime', 'datetime.datetime.strftime', ({(481, 70, 481, 75): 'dtime', (481, 77, 481, 86): '"""%Y%m%dT"""'}, {}), "(dtime, '%Y%m%dT')", False, 'import datetime\n'), ((512, 12, 513, 49), 'pypeit.msgs.error', 'msgs.error', ({(512, 23, 513, 48): '"""Cannot provide instrument setup without \'setup\' column; run set_configurations."""'}, {}), '(\n "Cannot provide instrument setup without \'setup\' column; run set_configurations."\n )', False, 'from pypeit import msgs\n'), ((574, 12, 574, 73), 'pypeit.msgs.error', 'msgs.error', ({(574, 23, 574, 72): '"""Cannot get setup names; run set_configurations."""'}, {}), "('Cannot get setup names; run set_configurations.')", False, 'from pypeit import msgs\n'), ((586, 48, 586, 70), 'numpy.atleast_1d', 'np.atleast_1d', ({(586, 62, 586, 69): 'configs'}, {}), '(configs)', True, 'import numpy as np\n'), ((591, 18, 591, 43), 'numpy.isin', 'np.isin', ({(591, 26, 591, 32): 'setups', (591, 34, 591, 42): '_configs'}, {}), '(setups, _configs)', True, 'import numpy as np\n'), ((618, 15, 618, 37), 'copy.deepcopy', 'deepcopy', ({(618, 24, 618, 36): 'self.configs'}, {}), '(self.configs)', False, 'from copy import deepcopy\n'), ((677, 12, 677, 82), 'pypeit.msgs.info', 'msgs.info', ({(677, 22, 677, 81): '"""Setup column already set. Finding unique configurations."""'}, {}), "('Setup column already set. Finding unique configurations.')", False, 'from pypeit import msgs\n'), ((678, 25, 678, 68), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((707, 12, 707, 68), 'pypeit.msgs.error', 'msgs.error', ({(707, 23, 707, 67): '"""No frames to use to define configurations!"""'}, {}), "('No frames to use to define configurations!')", False, 'from pypeit import msgs\n'), ((723, 12, 723, 77), 'pypeit.msgs.info', 'msgs.info', ({(723, 22, 723, 76): '"""All files assumed to be from a single configuration."""'}, {}), "('All files assumed to be from a single configuration.')", False, 'from pypeit import msgs\n'), ((807, 15, 807, 32), 'numpy.any', 'np.any', ({(807, 22, 807, 31): 'not_setup'}, {}), '(not_setup)', True, 'import numpy as np\n'), ((819, 12, 820, 42), 'pypeit.msgs.error', 'msgs.error', ({(819, 23, 820, 41): '"""To account for ignored frames, types must have been defined; run get_frame_types."""'}, {}), "(\n 'To account for ignored frames, types must have been defined; run get_frame_types.'\n )", False, 'from pypeit import msgs\n'), ((884, 19, 884, 54), 'numpy.isin', 'np.isin', ({(884, 27, 884, 36): 'self[key]', (884, 38, 884, 53): 'cfg_limits[key]'}, {}), '(self[key], cfg_limits[key])', True, 'import numpy as np\n'), ((924, 37, 924, 55), 'numpy.arange', 'np.arange', ({(924, 47, 924, 54): 'ngroups'}, {}), '(ngroups)', True, 'import numpy as np\n'), ((930, 18, 930, 59), 'pypeit.core.parse.str2list', 'parse.str2list', ({(930, 33, 930, 49): "self['calib'][i]", (930, 51, 930, 58): 'ngroups'}, {}), "(self['calib'][i], ngroups)", False, 'from pypeit.core import parse\n'), ((1015, 12, 1015, 99), 'pypeit.msgs.error', 'msgs.error', ({(1015, 23, 1015, 98): '"""Must have defined \'setup\' column first; try running set_configurations."""'}, {}), '(\n "Must have defined \'setup\' column first; try running set_configurations.")', False, 'from pypeit import msgs\n'), ((1078, 12, 1078, 78), 'pypeit.msgs.error', 'msgs.error', ({(1078, 23, 1078, 77): '"""Frame types are not set. First run get_frame_types."""'}, {}), "('Frame types are not set. First run get_frame_types.')", False, 'from pypeit import msgs\n'), ((1125, 19, 1125, 80), 'os.path.join', 'os.path.join', ({(1125, 32, 1125, 55): "self['directory'][indx]", (1125, 57, 1125, 79): "self['filename'][indx]"}, {}), "(self['directory'][indx], self['filename'][indx])", False, 'import os\n'), ((1126, 16, 1126, 33), 'os.path.join', 'os.path.join', ({(1126, 29, 1126, 30): 'd', (1126, 31, 1126, 32): 'f'}, {}), '(d, f)', False, 'import os\n'), ((1222, 12, 1222, 74), 'pypeit.msgs.warn', 'msgs.warn', ({(1222, 22, 1222, 73): '"""Removing existing frametype and framebit columns."""'}, {}), "('Removing existing frametype and framebit columns.')", False, 'from pypeit import msgs\n'), ((1240, 12, 1240, 57), 'pypeit.msgs.info', 'msgs.info', ({(1240, 22, 1240, 56): '"""Using user-provided frame types."""'}, {}), "('Using user-provided frame types.')", False, 'from pypeit import msgs\n'), ((1268, 12, 1268, 95), 'pypeit.msgs.warn', 'msgs.warn', ({(1268, 22, 1268, 94): '"""Cannot associate standard with science frames without sky coordinates."""'}, {}), "(\n 'Cannot associate standard with science frames without sky coordinates.')", False, 'from pypeit import msgs\n'), ((1289, 12, 1289, 63), 'pypeit.msgs.info', 'msgs.info', ({(1289, 22, 1289, 62): '"""Couldn\'t identify the following files:"""'}, {}), '("Couldn\'t identify the following files:")', False, 'from pypeit import msgs\n'), ((1360, 30, 1360, 57), 'numpy.all', 'np.all', ({(1360, 37, 1360, 56): "(self['comb_id'] < 0)"}, {}), "(self['comb_id'] < 0)", True, 'import numpy as np\n'), ((1412, 12, 1413, 57), 'pypeit.msgs.error', 'msgs.error', ({(1412, 23, 1413, 56): '"""Cannot write sorted instrument configuration table without \'setup\' column; run set_configurations."""'}, {}), '(\n "Cannot write sorted instrument configuration table without \'setup\' column; run set_configurations."\n )', False, 'from pypeit import msgs\n'), ((1415, 11, 1415, 32), 'os.path.isfile', 'os.path.isfile', ({(1415, 26, 1415, 31): 'ofile'}, {}), '(ofile)', False, 'import os\n'), ((1444, 20, 1444, 35), 'numpy.argsort', 'np.argsort', ({(1444, 31, 1444, 34): 'mjd'}, {}), '(mjd)', True, 'import numpy as np\n'), ((1480, 12, 1481, 85), 'pypeit.msgs.error', 'msgs.error', ({(1480, 23, 1481, 84): '"""Cannot write calibration groups without \'setup\' and \'calibbit\' columns; run set_configurations and set_calibration_groups."""'}, {}), '(\n "Cannot write calibration groups without \'setup\' and \'calibbit\' columns; run set_configurations and set_calibration_groups."\n )', False, 'from pypeit import msgs\n'), ((1483, 11, 1483, 32), 'os.path.isfile', 'os.path.isfile', ({(1483, 26, 1483, 31): 'ofile'}, {}), '(ofile)', False, 'import os\n'), ((1495, 32, 1495, 52), 'copy.deepcopy', 'deepcopy', ({(1495, 41, 1495, 51): 'cfg[setup]'}, {}), '(cfg[setup])', False, 'from copy import deepcopy\n'), ((1584, 26, 1584, 37), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((1598, 12, 1598, 45), 'pypeit.msgs.error', 'msgs.error', ({(1598, 23, 1598, 44): '"""No setups to write!"""'}, {}), "('No setups to write!')", False, 'from pypeit import msgs\n'), ((1609, 19, 1609, 50), 'os.path.join', 'os.path.join', ({(1609, 32, 1609, 43): 'output_path', (1609, 45, 1609, 49): 'root'}, {}), '(output_path, root)', False, 'import os\n'), ((1629, 12, 1630, 91), 'pypeit.par.util.make_pypeit_file', 'make_pypeit_file', (), '', False, 'from pypeit.par.util import make_pypeit_file\n'), ((1691, 33, 1691, 54), 'os.path.isfile', 'os.path.isfile', ({(1691, 48, 1691, 53): 'ofile'}, {}), '(ofile)', False, 'import os\n'), ((1743, 22, 1743, 42), 'numpy.logical_not', 'np.logical_not', ({(1743, 37, 1743, 41): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((1755, 13, 1755, 26), 'io.StringIO', 'io.StringIO', ({}, {}), '()', False, 'import io\n'), ((1795, 12, 1795, 92), 'pypeit.msgs.error', 'msgs.error', ({(1795, 23, 1795, 91): '"""Calibration groups are not set. First run set_calibration_groups."""'}, {}), "('Calibration groups are not set. First run set_calibration_groups.'\n )", False, 'from pypeit import msgs\n'), ((220, 18, 220, 41), 'numpy.asarray', 'np.asarray', ({(220, 29, 220, 40): "data['mjd']"}, {}), "(data['mjd'])", True, 'import numpy as np\n'), ((221, 24, 221, 52), 'numpy.asarray', 'np.asarray', ({(221, 35, 221, 51): "data['filename']"}, {}), "(data['filename'])", True, 'import numpy as np\n'), ((228, 12, 228, 26), 'pypeit.msgs.warn', 'msgs.warn', ({(228, 22, 228, 25): 'msg'}, {}), '(msg)', False, 'from pypeit import msgs\n'), ((581, 32, 581, 55), 'numpy.isin', 'np.isin', ({(581, 40, 581, 46): 'setups', (581, 48, 581, 54): 'ignore'}, {}), '(setups, ignore)', True, 'import numpy as np\n'), ((680, 15, 680, 29), 'numpy.sum', 'np.sum', ({(680, 22, 680, 28): 'ignore'}, {}), '(ignore)', True, 'import numpy as np\n'), ((699, 16, 699, 98), 'pypeit.msgs.error', 'msgs.error', ({(699, 27, 699, 97): '"""To ignore frames, types must have been defined; run get_frame_types."""'}, {}), "(\n 'To ignore frames, types must have been defined; run get_frame_types.')", False, 'from pypeit import msgs\n'), ((847, 28, 847, 71), 'numpy.unique', 'np.unique', ({(847, 38, 847, 70): 'self.table[metakey][in_cfg].data'}, {}), '(self.table[metakey][in_cfg].data)', True, 'import numpy as np\n'), ((856, 24, 856, 63), 'numpy.isin', 'np.isin', ({(856, 32, 856, 51): 'self.table[metakey]', (856, 53, 856, 62): 'uniq_meta'}, {}), '(self.table[metakey], uniq_meta)', True, 'import numpy as np\n'), ((885, 19, 885, 31), 'numpy.all', 'np.all', ({(885, 26, 885, 30): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((897, 24, 897, 44), 'numpy.logical_not', 'np.logical_not', ({(897, 39, 897, 43): 'good'}, {}), '(good)', True, 'import numpy as np\n'), ((952, 16, 952, 96), 'pypeit.msgs.error', 'msgs.error', ({(952, 27, 952, 95): '"""Science frames can only be assigned to a single calibration group."""'}, {}), "('Science frames can only be assigned to a single calibration group.'\n )", False, 'from pypeit import msgs\n'), ((1016, 18, 1016, 47), 'numpy.unique', 'np.unique', ({(1016, 28, 1016, 46): "self['setup'].data"}, {}), "(self['setup'].data)", True, 'import numpy as np\n'), ((1034, 16, 1035, 50), 'pypeit.msgs.error', 'msgs.error', ({(1034, 27, 1035, 49): '"""To set global frames, types must have been defined; run get_frame_types."""'}, {}), "(\n 'To set global frames, types must have been defined; run get_frame_types.')", False, 'from pypeit import msgs\n'), ((1089, 15, 1089, 29), 'numpy.where', 'np.where', ({(1089, 24, 1089, 28): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((1283, 27, 1283, 77), 'pypeit.core.flux_calib.find_standard_file', 'flux_calib.find_standard_file', (), '', False, 'from pypeit.core import flux_calib\n'), ((1291, 16, 1291, 28), 'pypeit.msgs.info', 'msgs.info', ({(1291, 26, 1291, 27): 'f'}, {}), '(f)', False, 'from pypeit import msgs\n'), ((1293, 16, 1293, 65), 'pypeit.msgs.error', 'msgs.error', ({(1293, 27, 1293, 64): '"""Check these files before continuing"""'}, {}), "('Check these files before continuing')", False, 'from pypeit import msgs\n'), ((1433, 19, 1433, 31), 'numpy.any', 'np.any', ({(1433, 26, 1433, 30): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((1520, 16, 1522, 59), 'pypeit.msgs.error', 'msgs.error', ({(1520, 27, 1522, 58): '"""Each calibration group must be from one and only one instrument configuration with a valid letter identifier; i.e., the configuration cannot be None."""'}, {}), "(\n 'Each calibration group must be from one and only one instrument configuration with a valid letter identifier; i.e., the configuration cannot be None.'\n )", False, 'from pypeit import msgs\n'), ((1534, 27, 1534, 45), 'pypeit.utils.yamlify', 'utils.yamlify', ({(1534, 41, 1534, 44): 'cfg'}, {}), '(cfg)', False, 'from pypeit import utils\n'), ((1610, 19, 1610, 38), 'os.path.isdir', 'os.path.isdir', ({(1610, 33, 1610, 37): 'odir'}, {}), '(odir)', False, 'import os\n'), ((1611, 16, 1611, 33), 'os.makedirs', 'os.makedirs', ({(1611, 28, 1611, 32): 'odir'}, {}), '(odir)', False, 'import os\n'), ((1619, 19, 1619, 33), 'numpy.any', 'np.any', ({(1619, 26, 1619, 32): 'in_cfg'}, {}), '(in_cfg)', True, 'import numpy as np\n'), ((1625, 17, 1625, 30), 'io.StringIO', 'io.StringIO', ({}, {}), '()', False, 'import io\n'), ((1707, 15, 1707, 29), 'numpy.any', 'np.any', ({(1707, 22, 1707, 28): 'badcol'}, {}), '(badcol)', True, 'import numpy as np\n'), ((1835, 25, 1835, 52), 'numpy.all', 'np.all', ({(1835, 32, 1835, 51): '(config[k] == row[k])'}, {}), '(config[k] == row[k])', True, 'import numpy as np\n'), ((186, 19, 186, 42), 'os.path.basename', 'os.path.basename', ({(186, 36, 186, 41): 'ifile'}, {}), '(ifile)', False, 'import os\n'), ((187, 20, 188, 86), 'pypeit.msgs.error', 'msgs.error', ({(187, 31, 188, 85): '"""File name list does not match user-provided metadata table. See usrdata argument of instantiation of PypeItMetaData."""'}, {}), "(\n 'File name list does not match user-provided metadata table. See usrdata argument of instantiation of PypeItMetaData.'\n )", False, 'from pypeit import msgs\n'), ((300, 15, 300, 52), 'numpy.where', 'np.where', ({(300, 24, 300, 51): "(f == self.table['filename'])"}, {}), "(f == self.table['filename'])", True, 'import numpy as np\n'), ((321, 32, 322, 79), 'pypeit.core.meta.convert_radec', 'meta.convert_radec', ({(321, 51, 321, 77): "usrdata['ra'][~nones].data", (322, 51, 322, 78): "usrdata['dec'][~nones].data"}, {}), "(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data)", False, 'from pypeit.core import meta\n'), ((838, 23, 838, 35), 'numpy.any', 'np.any', ({(838, 30, 838, 34): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((1513, 20, 1513, 53), 'numpy.unique', 'np.unique', ({(1513, 30, 1513, 52): "self['setup'][in_cbit]"}, {}), "(self['setup'][in_cbit])", True, 'import numpy as np\n'), ((1529, 45, 1529, 62), 'os.path.join', 'os.path.join', ({(1529, 58, 1529, 59): 'd', (1529, 60, 1529, 61): 'f'}, {}), '(d, f)', False, 'import os\n'), ((1616, 16, 1616, 41), 'pypeit.utils.yamlify', 'utils.yamlify', ({(1616, 30, 1616, 40): 'cfg[setup]'}, {}), '(cfg[setup])', False, 'from pypeit import utils\n'), ((1621, 20, 1621, 56), 'numpy.unique', 'np.unique', ({(1621, 30, 1621, 55): "self['directory'][in_cfg]"}, {}), "(self['directory'][in_cfg])", True, 'import numpy as np\n'), ((1716, 19, 1716, 57), 'numpy.where', 'np.where', ({(1716, 28, 1716, 56): '[(t == col) for t in tbl_cols]'}, {}), '([(t == col) for t in tbl_cols])', True, 'import numpy as np\n'), ((1726, 19, 1726, 57), 'numpy.where', 'np.where', ({(1726, 28, 1726, 56): '[(t == col) for t in tbl_cols]'}, {}), '([(t == col) for t in tbl_cols])', True, 'import numpy as np\n'), ((1744, 28, 1744, 45), 'numpy.where', 'np.where', ({(1744, 37, 1744, 44): 'is_None'}, {}), '(is_None)', True, 'import numpy as np\n'), ((210, 54, 210, 74), 'os.path.split', 'os.path.split', ({(210, 68, 210, 73): 'ifile'}, {}), '(ifile)', False, 'import os\n'), ((682, 28, 682, 42), 'numpy.sum', 'np.sum', ({(682, 35, 682, 41): 'ignore'}, {}), '(ignore)', True, 'import numpy as np\n'), ((1439, 31, 1439, 66), 'pypeit.io.dict_to_lines', 'dict_to_lines', (), '', False, 'from pypeit.io import dict_to_lines\n'), ((1745, 28, 1745, 42), 'numpy.where', 'np.where', ({(1745, 37, 1745, 41): 'indx'}, {}), '(indx)', True, 'import numpy as np\n'), ((1745, 46, 1745, 89), 'numpy.argsort', 'np.argsort', ({(1745, 57, 1745, 88): 'output_tbl[sort_col][indx].data'}, {}), '(output_tbl[sort_col][indx].data)', True, 'import numpy as np\n'), ((1829, 17, 1829, 41), 'numpy.abs', 'np.abs', ({(1829, 24, 1829, 40): '(config[k] - row[k])'}, {}), '(config[k] - row[k])', True, 'import numpy as np\n'), ((1037, 53, 1037, 69), 'numpy.arange', 'np.arange', ({(1037, 63, 1037, 68): 'n_cfg'}, {}), '(n_cfg)', True, 'import numpy as np\n'), ((1275, 72, 1275, 86), 'pypeit.msgs.newline', 'msgs.newline', ({}, {}), '()', False, 'from pypeit import msgs\n'), ((1277, 32, 1277, 46), 'pypeit.msgs.newline', 'msgs.newline', ({}, {}), '()', False, 'from pypeit import msgs\n')] |
menify/sandbox | aql/aql/main/aql_builtin_tools.py | 32166c71044f0d5b414335b2b6559adc571f568c |
import os.path
import shutil
import errno
from aql.nodes import Builder, FileBuilder
from .aql_tools import Tool
__all__ = ( "ExecuteCommand",
"InstallBuilder",
"BuiltinTool",
)
"""
Unique Value - name + type
value
node
node = ExecuteCommand('gcc --help -v')
tools.cpp.cxx
node = ExecuteCommand( tools.cpp.cxx, '--help -v' )
node = ExecuteMethod( target = my_function )
dir_node = CopyFiles( prog_node, target = dir_name )
dir_node = CopyFilesAs( prog_node, target = dir_name )
dir_node = MoveFiles( prog_node, )
dir_node = MoveFilesAs( prog_node )
dir_node = RemoveFiles( prog_node )
node = FindFiles( dir_node )
dir_node = FileDir( prog_node )
"""
def _makeTagetDirs( path_dir ):
try:
os.makedirs( path_dir )
except OSError as e:
if e.errno != errno.EEXIST:
raise
#//===========================================================================//
class ExecuteCommand (Builder):
def build( self, node ):
cmd = node.getSources()
out = self.execCmd( cmd )
node.setNoTargets()
return out
#//-------------------------------------------------------//
def getBuildStrArgs( self, node, brief ):
cmd = node.getSourceValues()
return (cmd,)
#//===========================================================================//
class InstallBuilder (FileBuilder):
def __init__(self, options, target ):
self.target = os.path.abspath( target )
#//-------------------------------------------------------//
def build( self, node ):
sources = node.getSources()
target = self.target
_makeTagetDirs( target )
for source in sources:
if os.path.isfile( source ):
shutil.copy( source, target )
node.setNoTargets()
#//-------------------------------------------------------//
def getTraceTargets( self, node, brief ):
return self.target
#//===========================================================================//
class BuiltinTool( Tool ):
def ExecuteCommand( self, options ):
return ExecuteCommand( options )
def Install(self, options, target ):
return InstallBuilder( options, target )
def DirName(self, options):
raise NotImplementedError()
def BaseName(self, options):
raise NotImplementedError()
| [((80, 8, 80, 37), 'shutil.copy', 'shutil.copy', ({(80, 21, 80, 27): 'source', (80, 29, 80, 35): 'target'}, {}), '(source, target)', False, 'import shutil\n')] |
stefanw/django-cms | cms/test_utils/project/placeholderapp/models.py | 048ec9e7a529549d51f4805fdfbcd50ea1e624b0 | from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from cms.models.fields import PlaceholderField
from cms.utils import get_language_from_request
from cms.utils.urlutils import admin_reverse
from hvad.models import TranslatableModel, TranslatedFields
def dynamic_placeholder_1(instance):
return instance.char_1
def dynamic_placeholder_2(instance):
return instance.char_2
@python_2_unicode_compatible
class Example1(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
date_field = models.DateField(null=True)
placeholder = PlaceholderField('placeholder')
static_admin_url = ''
def __init__(self, *args, **kwargs):
super(Example1, self).__init__(*args, **kwargs)
def callable_item(self, request):
return self.char_1
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("example_detail", args=(self.pk,))
def get_draft_url(self):
return self.get_absolute_url()
def get_public_url(self):
return '/public/view/'
def set_static_url(self, request):
language = get_language_from_request(request)
if self.pk:
self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
return self.pk
def dynamic_url(self, request):
language = get_language_from_request(request)
return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
class TwoPlaceholderExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')
placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')
class DynamicPlaceholderSlotExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')
placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')
@python_2_unicode_compatible
class CharPksExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
slug = models.SlugField(u'char_1', max_length=255, primary_key=True)
placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1')
def __str__(self):
return "%s - %s" % (self.char_1, self.pk)
@python_2_unicode_compatible
class MultilingualExample1(TranslatableModel):
translations = TranslatedFields(
char_1=models.CharField(u'char_1', max_length=255),
char_2=models.CharField(u'char_2', max_length=255),
)
placeholder_1 = PlaceholderField('placeholder_1')
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail_multi", args=(self.pk,))
| [((22, 13, 22, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((23, 13, 23, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((24, 13, 24, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((25, 13, 25, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((26, 17, 26, 44), 'django.db.models.DateField', 'models.DateField', (), '', False, 'from django.db import models\n'), ((27, 18, 27, 49), 'cms.models.fields.PlaceholderField', 'PlaceholderField', ({(27, 35, 27, 48): '"""placeholder"""'}, {}), "('placeholder')", False, 'from cms.models.fields import PlaceholderField\n'), ((61, 13, 61, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((62, 13, 62, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((63, 13, 63, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((64, 13, 64, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((65, 20, 65, 72), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (), '', False, 'from cms.models.fields import PlaceholderField\n'), ((66, 20, 66, 72), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (), '', False, 'from cms.models.fields import PlaceholderField\n'), ((70, 13, 70, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((71, 13, 71, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((72, 20, 72, 88), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (), '', False, 'from cms.models.fields import PlaceholderField\n'), ((73, 20, 73, 88), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (), '', False, 'from cms.models.fields import PlaceholderField\n'), ((78, 13, 78, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((79, 11, 79, 72), 'django.db.models.SlugField', 'models.SlugField', (), '', False, 'from django.db import models\n'), ((80, 20, 80, 79), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (), '', False, 'from cms.models.fields import PlaceholderField\n'), ((92, 20, 92, 53), 'cms.models.fields.PlaceholderField', 'PlaceholderField', ({(92, 37, 92, 52): '"""placeholder_1"""'}, {}), "('placeholder_1')", False, 'from cms.models.fields import PlaceholderField\n'), ((41, 15, 41, 57), 'django.core.urlresolvers.reverse', 'reverse', (), '', False, 'from django.core.urlresolvers import reverse\n'), ((50, 19, 50, 53), 'cms.utils.get_language_from_request', 'get_language_from_request', ({(50, 45, 50, 52): 'request'}, {}), '(request)', False, 'from cms.utils import get_language_from_request\n'), ((56, 19, 56, 53), 'cms.utils.get_language_from_request', 'get_language_from_request', ({(56, 45, 56, 52): 'request'}, {}), '(request)', False, 'from cms.utils import get_language_from_request\n'), ((57, 15, 57, 92), 'cms.utils.urlutils.admin_reverse', 'admin_reverse', (), '', False, 'from cms.utils.urlutils import admin_reverse\n'), ((98, 15, 98, 55), 'django.core.urlresolvers.reverse', 'reverse', (), '', False, 'from django.core.urlresolvers import reverse\n'), ((52, 36, 52, 113), 'cms.utils.urlutils.admin_reverse', 'admin_reverse', (), '', False, 'from cms.utils.urlutils import admin_reverse\n'), ((89, 15, 89, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((90, 15, 90, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n')] |
DGrifferty/Python | 150-Challenges/Challenges 80 - 87/Challenge 84.py | d725301664db2cbcfd5c4f5974745b4d81c8e28a | # 084
# Ask the user to type in their postcode.Display the first two
# letters in uppercase.
# very simple
print(input('Enter your postcode: ')[0:2].upper()) | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.