text
stringlengths 4
1.02M
| meta
dict |
---|---|
import scrapy
import re
from locations.items import GeojsonPointItem
class MokaHouseSpider(scrapy.Spider):
name = 'moka_house'
allowed_domains = ['mokahouse.com']
start_urls = (
'http://mokahouse.com/locations/',
)
def parse(self, response):
links = response.xpath('//div[@class="entry-summary"]/p/a')
for link in links:
if link == links[-1]:
continue
name = re.search(r'(.*)(\s\(.*\))', link.xpath('text()').extract_first()).groups()[0]
yield scrapy.Request(
link.xpath('@href').extract_first(),
meta = {'name': name},
callback = self.parse_link
)
def parse_link(self, response):
ref = response.xpath('//div[@class="article-wrapper"]/article/@id').extract_first()
name = response.meta['name']
website = response.url
tmp_data = response.xpath('//div[@class="entry-summary"]/p')
p_2 = tmp_data[1].xpath('text()').extract()
p_3 = tmp_data[2].xpath('text()').extract()
p_4 = tmp_data[3].xpath('text()').extract()
p_5 = tmp_data[4].xpath('text()').extract()
l_address = p_2 + p_3
l_address = [x for x in l_address if x != '\n']
address_text = ''.join(l_address).replace('\n', ', ')
address_data = re.search(r'(.*)(,\s)(.*)(\sBC)(.*)', address_text).groups()
street = address_data[0].strip()
city = address_data[2].replace(',', '').strip()
state = address_data[3].strip()
postcode = address_data[4].strip()
tmp_data_extract = tmp_data.extract()
phone_text = tmp_data_extract[2] + tmp_data_extract[3]
phone_number = ''
match = re.search(r'<strong>P:\s(.*)<\/strong>', phone_text)
if match is not None:
phone_number = match.groups()[0]
l_hours = p_4 + p_5
l_hours = [x.replace('\n', '') for x in l_hours if x != '\n' and self.hasDigit(x)]
opening_hours = ';'.join(l_hours)
properties = {
'ref': ref,
'name': name,
'website': website,
'phone': phone_number,
'street': street,
'city': city,
'state': state,
'postcode': postcode,
'opening_hours': opening_hours
}
yield GeojsonPointItem(**properties)
def hasDigit(self, str):
return bool(re.search(r'\d', str))
| {
"content_hash": "39f7ee047bf1576d99def6c76f880b24",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 97,
"avg_line_length": 35.394366197183096,
"alnum_prop": 0.5113410266613609,
"repo_name": "iandees/all-the-places",
"id": "ea909e056f01ab8cd768ad61c1724f98de7416ae",
"size": "2537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locations/spiders/moka_house.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2134"
},
{
"name": "Python",
"bytes": "116132"
},
{
"name": "Shell",
"bytes": "4477"
}
],
"symlink_target": ""
} |
from pyramid.renderers import render_to_response
from groupdocs.ApiClient import ApiClient
from groupdocs.AsyncApi import AsyncApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
import time
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
# Sample 18
def sample18(request):
clientId = request.POST.get('client_id')
privateKey = request.POST.get('private_key')
fileId = request.POST.get('fileId')
targetType = request.POST.get('convert_type')
# Checking required parameters
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False or IsNotNull(fileId) == False or IsNotNull(targetType) == False:
return render_to_response('__main__:templates/sample18.pt',
{ 'error' : 'You do not enter all parameters' })
### Create Signer, ApiClient and AsyncApi objects
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create AsyncApi object
async = AsyncApi(apiClient)
try:
convert = async.Convert(clientId, fileId, targetType=targetType)
# check request status
if convert.status == "Ok":
# Delay necessary that the inquiry would manage to be processed
time.sleep(5)
# Make request to api for get document info by job id
jobs = async.GetJobDocuments(clientId, convert.result.job_id)
# Get file guid
guid = jobs.result.inputs[0].outputs[0].guid
# Generating iframe
iframe = '<iframe src="https://apps.groupdocs.com/document-viewer/embed/' + guid + '" frameborder="0" width="100%" height="600"></iframe>'
except Exception, e:
return render_to_response('__main__:templates/sample18.pt',
{ 'error' : str(e) })
# Set variables for template
return render_to_response('__main__:templates/sample18.pt',
{
'userId' : clientId,
'privateKey' : privateKey,
'fileId' : fileId,
'targetType' : targetType,
'iframe' : iframe
},
request=request) | {
"content_hash": "b161c4971334f9fdb74cfd81b3031650",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 150,
"avg_line_length": 35.9344262295082,
"alnum_prop": 0.6459854014598541,
"repo_name": "liosha2007/temporary-groupdocs-python-sdk",
"id": "7f16a8e63de7395e9045b928e8c7eae1a9cf867d",
"size": "2322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/api-samples/inc_samples/sample18.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1070081"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_head_identity_provider.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.identity_provider.get_entity_tag(
resource_group_name="rg1",
service_name="apimService1",
identity_provider_name="aadB2C",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementHeadIdentityProvider.json
if __name__ == "__main__":
main()
| {
"content_hash": "74f9d52f7a27115e1118447260c006f3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 156,
"avg_line_length": 33.85294117647059,
"alnum_prop": 0.73501303214596,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d0b20c6dee29f9e9e98b6a0607160541ed6ad7e9",
"size": "1619",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_head_identity_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
import os
import csv
import argparse
import re
from .utils import (
ExternalSystemHelper,
ModelFinder,
SupportedFileChecker,
CsvActionFactory)
from nsync.policies import (
BasicSyncPolicy,
OrderedSyncPolicy,
TransactionSyncPolicy
)
(DEFAULT_FILE_REGEX) = (r'(?P<external_system>[a-zA-Z0-9]+)_'
r'(?P<app_name>[a-zA-Z0-9]+)_'
r'(?P<model_name>[a-zA-Z0-9]+).*\.csv')
class Command(BaseCommand):
help = 'Sync info from a list of files'
def add_arguments(self, parser):
# Mandatory
parser.add_argument('files', type=argparse.FileType('r'), nargs='+')
# Optional
parser.add_argument(
'--file_name_regex',
type=str,
default=DEFAULT_FILE_REGEX,
help='The regular expression to obtain the system name, app name '
'and model name from each file')
parser.add_argument(
'--create_external_system',
type=bool,
default=True,
help='If true, the command will create a matching external '
'system object if one cannot be found')
parser.add_argument(
'--smart_ordering',
type=bool,
default=True,
help='When this option it true, the command will perform all '
'Create actions, then Update actions, and finally Delete '
'actions. This ensures that if one file creates an object '
'but another deletes it, the order that the files are '
'provided to the command is not important. Default: True')
parser.add_argument(
'--as_transaction',
type=bool,
default=True,
help='Wrap all of the actions in a DB transaction Default:True')
def handle(self, *args, **options):
TestableCommand(**options).execute()
class TestableCommand:
def __init__(self, **options):
self.files = options['files']
self.pattern = re.compile(options['file_name_regex'])
self.create_external_system = options['create_external_system']
self.ordered = options['smart_ordering']
self.use_transaction = options['as_transaction']
def execute(self):
actions = self.collect_all_actions()
if self.ordered:
policy = OrderedSyncPolicy(actions)
else:
policy = BasicSyncPolicy(actions)
if self.use_transaction:
policy = TransactionSyncPolicy(policy)
policy.execute()
def collect_all_actions(self):
actions = []
for f in self.files:
if not SupportedFileChecker.is_valid(f):
raise CommandError('Unsupported file:{}'.format(f))
basename = os.path.basename(f.name)
(system, app, model) = TargetExtractor(self.pattern).extract(
basename)
external_system = ExternalSystemHelper.find(
system, self.create_external_system)
model = ModelFinder.find(app, model)
reader = csv.DictReader(f)
builder = CsvActionFactory(model, external_system)
for d in reader:
actions.extend(builder.from_dict(d))
return actions
class TargetExtractor:
def __init__(self, pattern):
self.pattern = pattern
def extract(self, filename):
result = self.pattern.match(filename)
return (result.group('external_system'),
result.group('app_name'),
result.group('model_name'))
| {
"content_hash": "734b7e608d3e2905377851d3cebaa7a0",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 33.42727272727273,
"alnum_prop": 0.5863475659505031,
"repo_name": "andrewdodd/django-nsync",
"id": "f20968fb38302ca94ffa14c38ff54c259b2d5d3d",
"size": "3677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nsync/management/commands/syncfiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134137"
}
],
"symlink_target": ""
} |
import numpy as np
from coinrun import setup_utils, make
def random_agent(num_envs=1, max_steps=100000):
setup_utils.setup_and_load(use_cmd_line_args=False)
env = make('standard', num_envs=num_envs)
for step in range(max_steps):
acts = np.array([env.action_space.sample() for _ in range(env.num_envs)])
_obs, rews, _dones, _infos = env.step(acts)
print("step", step, "rews", rews)
env.close()
if __name__ == '__main__':
random_agent() | {
"content_hash": "46382b309c9af6b4100bbb3e06973e55",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 81,
"avg_line_length": 30.125,
"alnum_prop": 0.6286307053941909,
"repo_name": "openai/coinrun",
"id": "5fb93e0fb3662f5203da32b89a371bf2b522a5bd",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coinrun/random_agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "60988"
},
{
"name": "Dockerfile",
"bytes": "304"
},
{
"name": "Makefile",
"bytes": "2079"
},
{
"name": "Python",
"bytes": "56112"
}
],
"symlink_target": ""
} |
from dynaconf.vendor.box.box import Box
class SBox(Box):
"""
ShorthandBox (SBox) allows for
property access of `dict` `json` and `yaml`
"""
_protected_keys = dir({}) + ['to_dict', 'to_json', 'to_yaml', 'json', 'yaml', 'from_yaml', 'from_json',
'dict', 'toml', 'from_toml', 'to_toml']
@property
def dict(self):
return self.to_dict()
@property
def json(self):
return self.to_json()
@property
def yaml(self):
return self.to_yaml()
@property
def toml(self):
return self.to_toml()
def __repr__(self):
return '<ShorthandBox: {0}>'.format(str(self.to_dict()))
def copy(self):
return SBox(super(SBox, self).copy())
def __copy__(self):
return SBox(super(SBox, self).copy())
| {
"content_hash": "54d28ac86e28503424d84fed5d160285",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 107,
"avg_line_length": 23.62857142857143,
"alnum_prop": 0.5392986698911729,
"repo_name": "rochacbruno/dynaconf",
"id": "746f7619a164a8386938cfe28875fe2a9567e077",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynaconf/vendor_src/box/shorthand_box.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2867"
},
{
"name": "Makefile",
"bytes": "11505"
},
{
"name": "Python",
"bytes": "1438471"
},
{
"name": "Shell",
"bytes": "14740"
}
],
"symlink_target": ""
} |
import types
from collections import defaultdict
"""Query filters"""
def _DIRECTION(keys, direction):
if isinstance(keys, types.StringTypes):
return (keys, direction),
elif isinstance(keys, (types.ListType, types.TupleType)):
return tuple([(k, direction) for k in keys])
def ASCENDING(keys):
"""Ascending sort order"""
return _DIRECTION(keys, 1)
def DESCENDING(keys):
"""Descending sort order"""
return _DIRECTION(keys, -1)
def GEO2D(keys):
"""
Two-dimensional geospatial index
http://www.mongodb.org/display/DOCS/Geospatial+Indexing
"""
return _DIRECTION(keys, "2d")
def GEOHAYSTACK(keys):
"""
Bucket-based geospatial index
http://www.mongodb.org/display/DOCS/Geospatial+Haystack+Indexing
"""
return _DIRECTION(keys, "geoHaystack")
class _QueryFilter(defaultdict):
def __init__(self):
defaultdict.__init__(self, lambda: ())
def __add__(self, obj):
for k, v in obj.items():
if isinstance(v, types.TupleType):
self[k] += v
else:
self[k] = v
return self
def _index_document(self, operation, index_list):
name = self.__class__.__name__
try:
assert isinstance(index_list, (types.ListType, types.TupleType))
for key, direction in index_list:
if not isinstance(key, types.StringTypes):
raise TypeError("Invalid %sing key: %s" % (name, repr(key)))
if direction not in (1, -1, "2d", "geoHaystack"):
raise TypeError("Invalid %sing direction: %s" % (name, direction))
self[operation] += tuple(((key, direction),))
except Exception:
raise TypeError("Invalid list of keys for %s: %s" % (name, repr(index_list)))
def __repr__(self):
return "<mongodb QueryFilter: %s>" % dict.__repr__(self)
class sort(_QueryFilter):
"""Sorts the results of a query."""
def __init__(self, key_list):
_QueryFilter.__init__(self)
try:
assert isinstance(key_list[0], (types.ListType, types.TupleType))
except:
key_list = (key_list,)
self._index_document("orderby", key_list)
class hint(_QueryFilter):
"""Adds a `hint`, telling Mongo the proper index to use for the query."""
def __init__(self, index_list):
_QueryFilter.__init__(self)
try:
assert isinstance(index_list[0], (types.ListType, types.TupleType))
except:
index_list = (index_list,)
self._index_document("$hint", index_list)
class explain(_QueryFilter):
"""Returns an explain plan for the query."""
def __init__(self):
_QueryFilter.__init__(self)
self["explain"] = True
class snapshot(_QueryFilter):
def __init__(self):
_QueryFilter.__init__(self)
self["snapshot"] = True
| {
"content_hash": "98c290281132cffe2cd353861d813dc5",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 89,
"avg_line_length": 27.914285714285715,
"alnum_prop": 0.5820539065165472,
"repo_name": "claytondaley/mongo-async-python-driver",
"id": "bf094413fcae37f8ff4a74f6ca8802d1a9f96591",
"size": "3523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txmongo/filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "121919"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
} |
import os
import sys
import argparse
import pandas as pd
# Submit jobs to the cluster
from gscripts.qtools import Submitter
WALLTIME = '2:00:00'
'''
Author: olga
Date created: 7/12/13 9:38 AM
The purpose of this program is to write submitter scripts to perform MISO
analysis on a large amount of files. This script assumes paired-end reads.
# **Note** for some reason,
Example runs:
TODO: deprecate the non-queue way of running
'''
# Class: CommandLine
class CommandLine(object):
def __init__(self, inOpts=None):
self.parser = argparse.ArgumentParser(
description='''Write a script to perform MISO analysis
on individual samples. By default, this is submitted to TSCC,
unless "--do-not-submit" is specified.
''',
add_help=True, prefix_chars='-')
samples = self.parser.add_mutually_exclusive_group(required=True)
samples.add_argument('--bam', type=str,
action='store', default='',
help='A single BAM file')
samples.add_argument('--sample-info', type=str, action='store',
help='A tab-delimited file with Bam files as '
'column 1 and the sample Ids as column 2 ('
'no header). If "--do-not-submit" is off, '
'this will submit an '
'array job to be a nice cluster user')
self.parser.add_argument('--sample-id', type=str,
action='store',
help='sample ID. required if using --bam',
required=False)
self.parser.add_argument('--genome', type=str, action='store',
required=True, help='Which genome to use')
self.parser.add_argument('--output-sh', type=str, required=False,
action='store',
help="The name of the .sh script created for "
"one-touch action. Required if using "
"'--bam' for a single sample."
"Not used with '--sample-info', "
"where each sample "
"gets its own sh file.")
self.parser.add_argument('--walltime',
action='store', default='1:00:00',
help='Walltime of each submitted job. '
'(default=1:00:00 aka 1 hour)')
self.parser.add_argument('--nodes=', action='store', default=1,
help='Number of nodes to request. '
'(default=1)')
self.parser.add_argument('--ppn', action='store', default=16,
help='Processors per node. (default=16)')
self.parser.add_argument('-l', '--read-length', required=False,
help='(optional) Read length of samples. If '
'not provided, the length of the first '
'read of the bam file is used.')
self.parser.add_argument('--do-not-submit',
action='store_true', default=False,
help='Whether or not to actually submit the '
'final file.')
if inOpts is None:
self.args = vars(self.parser.parse_args())
else:
self.args = vars(self.parser.parse_args(inOpts))
def do_usage_and_die(self, str):
'''
If a critical error is encountered, where it is suspected that the
program is not being called with consistent parameters or data, this
method will write out an error string (str), then terminate execution
of the program.
'''
import sys
print >> sys.stderr, str
self.parser.print_usage()
return 2
# Class: Usage
class Usage(Exception):
'''
Used to signal a Usage error, evoking a usage statement and eventual
exit when raised
'''
def __init__(self, msg):
self.msg = msg
class MisoPipeline(object):
def __init__(self, bam, sample_info_file,
sample_id, output_sh,
genome, walltime, nodes=1, ppn=16,
submit=False, read_length=None):
"""
Parameters
----------
Returns
-------
Raises
------
"""
self.sample_info_file = sample_info_file
if self.sample_info_file is not None:
sample_info = pd.read_table(self.sample_info_file, header=None)
self.bams = sample_info[0]
self.sample_ids = sample_info[1]
self.sh_files = ['{}.miso.sh'.format(bam) for bam in self.bams]
self.multiple_samples = True
else:
self.sample_ids = [sample_id]
self.bams = [bam]
self.sh_files = [output_sh]
self.multiple_samples = False
self.genome = genome
self.walltime = walltime
self.submit = submit
self.nodes = nodes
self.ppn = ppn
self.read_length = self.read_length
all_samples_commands = []
for bam, sample_id, sh_file in zip(self.bams, self.sample_ids,
self.sh_files):
self._write_single_sample(bam, sample_id, sh_file)
sh_command = 'bash {}'.format(sh_file)
if self.submit and not self.multiple_samples:
commands = [sh_command]
sub = Submitter(commands, job_name='miso',
sh_filename='{}.qsub.sh'.format(sh_file),
ppn=self.ppn, nodes=self.nodes,
walltime=self.walltime)
sub.job(submit=self.submit)
if self.multiple_samples:
all_samples_commands.append(sh_command)
if self.multiple_samples:
sub = Submitter(all_samples_commands, job_name='miso',
sh_filename='miso.qsub.sh',
array=True,
ppn=self.ppn, nodes=self.nodes,
walltime=self.walltime)
sub.job(submit=self.submit)
def _write_single_sample(self, bam, sample_id, sh_file):
commands = []
commands.append('#!/bin/bash')
commands.append('# Finding all MISO splicing scores for sample: {}. '
'Yay!\n'
.format(sample_id))
event_types = ['SE', 'MXE', 'AFE', 'ALE', 'A3SS', 'A5SS',
'RI', 'TANDEMUTR']
'''
Get the read length. Gonna keep this as bash because samtools and head are very fast.
WARNING - this only takes the read length of the first read, not the average read length.
This has caused problems in the past if the first read is shorter than the average, for some reason
it seems like all reads longer than what is inputed as readlen get thrown out. Should be changed to get
the average or most abundant read length instead. (9/2/15)
'''
if self.read_length is None:
commands.append(
"READ_LEN=$(samtools view %s | head -n 1 | cut -f 10 | awk '{ "
"print length }')" % (bam))
else:
commands.append('READ_LEN={}'.format(self.read_length))
for event_type in event_types:
out_dir = '{}/miso/{}/{}'.format(os.path.dirname(os.path
.abspath(bam)),
sample_id, event_type)
psi_out = '{}/psi.out'.format(out_dir)
psi_err = '{}/psi.err'.format(out_dir)
commands.append('\n\n# calculate Psi scores for'
' all {} events'.format(event_type))
commands.append('mkdir -p {}'.format(out_dir))
commands.append("miso \
--run {genome_dir}/{genome}/miso/{event_type}_index \
{bam} --output-dir {out_dir} \
--read-len $READ_LEN \
--settings-filename {6}/hg19/miso_annotations"
"/miso_settings_min_event_reads10.txt \
-p {ppn} \
> {psi_out} \
2> {psi_err}".format(genome=self.genome, event_type=event_type, bam=bam,
out_dir=out_dir, psi_out=psi_out, psi_err=psi_err,
genome_dir=os.environ['GENOME'], ppn=self.ppn))
commands.append("\n# Check that the psi calculation jobs didn't "
"fail.\n#'-z' "
"returns "
"true when a string is empty, so this is checking "
"that grepping these files for the words 'failed' "
"and 'shutdown' didn't find anything.")
commands.append('iffailed=$(grep failed {})'.format(psi_out))
commands.append('ifshutdown=$(grep shutdown {})'.format(psi_err))
commands.append(
"if [ ! -z \"$iffailed\" -o ! -z \"$ifshutdown\" ] ; "
"then\n\
#rm -rf {0}\n\
echo \"MISO psi failed on event type: {1}\"\n\
exit 1\n\
fi\n".format(out_dir, event_type))
commands.append('# Summarize psi scores for all {} events'
.format(event_type))
commands.append('run_miso.py '
'--summarize-samples {0} ' \
'{0} >{0}/summary.out 2>{0}/summary.err'.format(
out_dir))
commands.append("\n# Check that the summary jobs didn't fail")
commands.append("# '-s' returns true if file size is nonzero, "
"and the error file should be empty.")
commands.append("""if [ -s {0}/summary.err ] ; then
#rm -rf {0}\n
echo 'MISO psi failed on event type: {1}'
exit 1
fi
""".format(out_dir, event_type))
with open(sh_file, 'w') as f:
f.write('\n'.join(commands))
sys.stdout.write('Wrote miso script for sample "{}": {}\n'.format(
sample_id, sh_file))
# Function: main
def main():
'''
This function is invoked when the program is run from the command line,
i.e. as:
python submit_miso_pipeline.py
or as:
./submit_miso_pipeline.py
If the user has executable permissions on the user (set by chmod ug+x
program.py or by chmod 775 program py. Just need the 4th bit set to true)
'''
cl = CommandLine()
try:
submit = not cl.args['do_not_submit']
MisoPipeline(cl.args['bam'],
cl.args['sample_info'],
cl.args['sample_id'],
cl.args['output_sh'],
cl.args['genome'],
cl.args['walltime'],
cl.args['nodes'],
cl.args['ppn'],
read_length=cl.args['read_length'],
submit=submit)
# If not all the correct arguments are given, break the program and
# show the usage information
except Usage, err:
cl.do_usage_and_die(err.msg)
if __name__ == '__main__':
main()
| {
"content_hash": "73f66bf9694b3320efa92faeb972968e",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 112,
"avg_line_length": 39.436860068259385,
"alnum_prop": 0.5007356122890524,
"repo_name": "YeoLab/gscripts",
"id": "cbd851ede3db78b03e39856d43e3abb8ec8e89d0",
"size": "11624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gscripts/miso/submit_miso_pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6662"
},
{
"name": "Perl",
"bytes": "542826"
},
{
"name": "Python",
"bytes": "816916"
},
{
"name": "R",
"bytes": "370788"
},
{
"name": "Scala",
"bytes": "215735"
},
{
"name": "Shell",
"bytes": "12592"
}
],
"symlink_target": ""
} |
import numbers, array, itertools, operator, math
from collections import defaultdict
from math import fsum
from .misc import minmax2
from .string import join, format_char
default_number_format = '.3g'
class DistributionTable(object):
def count(self):
return NotImplemented
def __truediv__(self, divisor):
return NotImplemented
def __str__(self):
return self.__format__(default_number_format)
def normalize(self, count=None):
if count is None:
count = self.count()
return self / count
class SparseDistributionTable(DistributionTable, defaultdict):
""""Holds a probability distribution and can compute the distance to other dists"""
def __init__(self, type=int, *args):
"""
:param type: type
:param args: *
:return:
"""
assert issubclass(type, numbers.Real)
DistributionTable.__init__(self)
defaultdict.__init__(self, type, *args)
def distance_to(self, other):
return fsum((abs(p - other[bin]) for bin, p in self.items())) + \
fsum(p for bin, p in other.items() if bin not in self)
def count(self):
return sum(self.values())
def increase(self, item, value=1):
self[item] += value
def __truediv__(self, divisor):
"""
:param divisor: numbers.Real
:return: SparseDistributionTable
"""
divisor = float(divisor)
return SparseDistributionTable(float,
((k, v / divisor) for k, v in self.items()))
def __format__(self, number_format_spec=''):
return join('(',
', '.join((
'{}: {:{}}'.format(format_char(event), frequency, number_format_spec)
for event, frequency in self.items())),
')')
class UniformBinDistributionTable(DistributionTable):
def __init__(self, start, stop, bincount=None, datatype=None, initializer=None):
super().__init__()
assert stop > start
if bincount is None:
bincount = int(math.ceil(stop - start))
assert isinstance(bincount, numbers.Integral) and bincount >= 1
self.lower = start
self.upper = stop
self.step = (stop - start) / bincount
if self.__step.is_integer():
self.__step = int(self.__step)
if initializer is None:
initializer = itertools.repeat(0, bincount)
self.data = list(initializer) if datatype is None else array.array(datatype, initializer)
excess = len(self.data) - bincount
if excess > 0:
del self.data[bincount:]
elif excess < 0:
self.data.extend(itertools.repeat(0, -excess))
@property
def step(self):
return self.__step
@step.setter
def step(self, value):
self.__step = value
self.__invstep = 1. / value
@property
def invstep(self):
return self.__invstep
def datatype(self):
return self.data.typecode if isinstance(self.data, array.array) else None
def getbinlower(self, binidx):
return binidx * self.__step + self.lower
def getbinupper(self, binidx):
return self.getbinlower(binidx + 1)
def getbinlimits(self, binidx):
return self.getbinlower(binidx), self.getbinupper(binidx)
def getbinidx(self, key):
if key <= self.lower:
return 0
if key >= self.upper:
return len(self.data) - 1
else:
return self.__getbinidx_raw(key)
def __getbinidx_raw(self, key):
return int((key - self.lower) * self.__invstep)
def __getitem__(self, key):
return self.data[self.getbinidx(key)]
def __setitem__(self, key, value):
self.data[self.getbinidx(key)] = value
def increase(self, key, value=1):
self.data[self.getbinidx(key)] += value
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def count(self):
return sum(self)
def __truediv__(self, divisor):
return UniformBinDistributionTable(self.lower, self.upper, len(self.data), 'd',
map(float(divisor).__rtruediv__, self.data))
def distance_to(self, other):
"""
:param other: UniformBinDistributionTable
:return: float
"""
if isinstance(other, UniformBinDistributionTable):
if self.lower == other.lower and self.upper == other.upper and self.__step == other.__step:
other = other.data
else:
return self.__distance_to2(other)
assert not hasattr(other, '__len__') or len(self.data) == len(other)
return fsum(map(abs, map(operator.sub, self.data, other)))
def __distance_to2(self, other):
return (
UniformBinDistributionTable.__distance_to2_lower(
*minmax2(self, other, 'lower')) +
UniformBinDistributionTable.__distance_to2_upper(
*minmax2(self, other, 'upper', True)) +
fsum(UniformBinDistributionTable.__distance_to2_middle_parts(
*minmax2(self, other, 'step'))))
def __distance_to2_middle_parts(self, other):
assert self.__step <= other.__step
assert self.lower < self.upper and other.lower < other.upper
lower = max(self.lower, other.lower)
self_binidx = self.__getbinidx_raw(lower)
self_binlimits_next = self.getbinlimits(self_binidx)
other_binidx = other.__getbinidx_raw(lower)
other_binlimits = other.getbinlimits(other_binidx)
while self_binidx < len(self.data) and other_binidx < len(other.data):
self_binlimits = self_binlimits_next
yield abs((self.data[self_binidx] * self.__invstep) - (other.data[other_binidx] * other.__invstep)) * \
(min(self_binlimits[1], other_binlimits[1]) - max(self_binlimits[0], other_binlimits[0]))
if self_binlimits[1] <= other_binlimits[1]:
self_binidx += 1
self_binlimits_next = self.getbinlimits(self_binidx)
if self_binlimits[1] >= other_binlimits[1]:
other_binidx += 1
other_binlimits = other.getbinlimits(other_binidx)
def __distance_to2_lower(self, other):
"""
:param other: UniformBinDistributionTable
:return: float
"""
assert self.lower <= other.lower
lower_bin_end = (
self.__getbinidx_raw(other.lower)
if self.upper > other.lower else
len(self.data))
lower = fsum(itertools.islice(self.data, 0, lower_bin_end))
if lower_bin_end < len(self.data):
lower += self.data[lower_bin_end] * self.__invstep * \
(other.lower - self.getbinlower(lower_bin_end))
return lower
def __distance_to2_upper(self, other):
"""
:param other: UniformBinDistributionTable
:return: float
"""
assert self.upper >= other.upper
upper_bin_start = (
self.__getbinidx_raw(other.upper)
if self.lower < other.upper else
0)
upper = fsum(itertools.islice(self.data, upper_bin_start + 1, len(self.data)))
if upper_bin_start < len(self.data):
upper += self.data[upper_bin_start] * self.__invstep * \
(self.getbinupper(upper_bin_start) - other.upper)
return upper
def __format__(self, number_format_spec=''):
return join('[',
', '.join((
'[{2:{0}}-{3:{0}}): {1:{0}}'.format(
number_format_spec, frequency, *self.getbinlimits(bin_idx))
for bin_idx, frequency in enumerate(self))),
']')
@staticmethod
def for_count(count, lower, upper, *args):
""" uses Sturge's rule: ceil(1 + log2(count)) """
return UniformBinDistributionTable(
lower, upper, _sturges_rule(count), *args)
@staticmethod
def for_variance(count, lower, upper, variance, *args):
""" uses Scott's rule, limited by the double of Sturge's """
h = int(math.ceil(3.49 * math.sqrt(variance) / _cubicroot(count)))
return UniformBinDistributionTable(
lower, upper, min(h, 2 * _sturges_rule(count)), *args)
@staticmethod
def for_quartiles(count, lower, upper, q1, q3, *args):
""" uses Freedman's and Diaconis' rule """
h = int(math.ceil(2.0 * (q3 - q1) / _cubicroot(count)))
return UniformBinDistributionTable(
lower, upper, min(h, 2 * _sturges_rule(count)), *args)
def _sturges_rule(n):
assert isinstance(n, numbers.Integral) and n > 0
return (n - 1).bit_length() + 1
def _cubicroot(x):
return x ** (1. / 3.)
| {
"content_hash": "5b459668b56e59850beeb70132e6af0b",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 106,
"avg_line_length": 25.283333333333335,
"alnum_prop": 0.6729070533948582,
"repo_name": "davidfoerster/schema-matching",
"id": "ab7f056a09a05088a8e397211e8ad75929b9a890",
"size": "7585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/schema_matching/utilities/distribution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "719"
},
{
"name": "Python",
"bytes": "74814"
}
],
"symlink_target": ""
} |
from weibocommon import weibocommon
from factory.supports import htmlstripper
import rootcfg
import re
import time
from factory.supports.parser import parser
from factory.supports.logger import logger
from rootcfg import FOLLOW_URL_PREFIX
from rootcfg import FOLLOW_URL_POSTFIX
from rootcfg import HIS_FOLLOW_PREFIX
from rootcfg import COMMON_URL_PREFIX
from rootcfg import HIS_FOLLOW_POSTFIX
import sys
if sys.getdefaultencoding() != "utf-8":
reload(sys)
sys.setdefaultencoding('utf-8')
class weibofollowers(weibocommon):
def __init__(self,openers,task_dispatcher, result_handler=None):
super(weibofollowers,self).__init__(openers,task_dispatcher,result_handler)
self.index = 0
self.people = 0
self.handler = result_handler
self.baduids = {}
def __get_follow_url(self,uid):
fl_url = FOLLOW_URL_PREFIX+uid+FOLLOW_URL_POSTFIX
return fl_url
def do_run(self,task):
#/1749127163/follow?page=
uid = task
url = self.__get_follow_url(uid)
print url
ps = self.get_target(url,HIS_FOLLOW_PREFIX)
if ps == -1:
if uid in self.baduids:
self.baduids[uid] += 1
if self.baduids[uid] > 5:
logger.logger_error("uid: %s cannot be parsed"%uid)
if self.__check_baduid() == -1:
return
else:
self.baduids[uid] = 1
if self.baduids[uid] < 6:
#self.uids.insert(0,uid)
#FIXme, add the failed uid to memdb again, marked as failed
pass
else:
pagenum = self.__get_page_num(ps)
self.get_uids(ps)
self.parse_pages(uid,pagenum) #start from page2l
def __check_baduid(self):
#if all the uids in self.uids are in baduid, return -1, some are in return 0, none in return 1
uidnum = 2
baduidnum = len(self.baduids)
if baduidnum == uidnum:
return -1
if baduidnum > 0:
return 0
if baduidnum == 0:
return 1
def parse_pages(self,uid,pagenum):
#I don't think it's a good idea to usr mutiple threads here, so just leave it in the current process
#page 1 should be parse directly
self.people = self.people+1
totalnum = pagenum
if pagenum > 10:
pagenum = 10 #weibo limitation
if pagenum == -1:
return -1
for i in range(2,pagenum+1):
url = COMMON_URL_PREFIX+uid+HIS_FOLLOW_POSTFIX+str(i)
ps = self.get_target(url,HIS_FOLLOW_PREFIX)
self.get_uids(ps)
print "+++++++apture: " + uid+" page: "+str(i) + " total: "+str(totalnum) + " people: " + str(self.people)
def __get_page_num(self,ps):
if (ps==None) or (ps==-1):
return -1
pages = ps.find("div",attrs={"class":"W_pages W_pages_comment","node-type":"pageList"})
al = pages.find_all("a",attrs={"class":"page S_bg1"})
if ((al==None) or (al=="") or (al==" ")):
return 1
pagenum = 0
for a1 in al:
if int(a1.string) > pagenum:
pagenum = int(a1.string)
return pagenum
def get_uids(self,ps):
userbox = ps.find("ul",attrs={"class":"cnfList","node-type":"userListBox"})
#usrlis = userbox.find_all("li",attrs={"class":"clearfix S_line1","action":"itemClick"})
#to be more precise
usrlis = ps.find_all("div",attrs={"class":"con_left"})
retlist = []
for user in usrlis:
retv = {}
a1 = user.find("a",attrs={"class":"W_f14 S_func1"})
userid = a1['usercard'][3:]
userhref = a1['href']
usernick = htmlstripper.strip_tags(str(a1)).decode('utf-8')
#a2 = user.find("i",attrs={"class":re.compile(ur"W_ico16 approve")}) #fix to use regex here
#approve
#approve_co
#regex does not work???
usertype = ""
a2 = user.find("i",attrs={"class":"W_ico16 approve"})
if not a2:
a2 = user.find("i",attrs={"class":"W_ico16 approve_co"})
if a2:
usertype = a2['title']
a3 = user.find("i",attrs={"class":"W_ico16 member"})
ismember = 0
if a3:
ismember = 1
span1 = user.find("span",attrs={"class":"addr"})
useraddr = htmlstripper.strip_tags(str(span1)).decode('utf-8')
#
fl_href = "/"+userid+"/follow"
fs_href = "/"+userid+"/fans"
#wb_href = userhref
connect1 = user.find("div",attrs={"class":"connect"})
a4 = connect1.find("a",attrs={"href":fl_href})
fl_num = a4.string
a5 = connect1.find("a",attrs={"href":fs_href})
fs_num = a5.string
a6 = connect1.find("a",attrs={"href":userhref})
wb_num = a6.string
info = user.find("div",attrs={"class":"info"})
infotxt = ""
if info:
infotxt = info.string
print "need photo"
print "id: "+userid + ", nick: "+usernick+", href: "+userhref
print "follower num: "+fl_num + ", fans num: "+fs_num+", weibo num: "+wb_num
print "user addr: "+useraddr+" usertype: "+usertype
print "info: "+infotxt
retv['uid'] = userid
retv['nick'] = usernick
retv['href'] = userhref
retv['follower_num'] = fl_num
retv['fans_num'] = fs_num
retv['miniblog_num'] = wb_num
retv['address'] = useraddr
retv['usertype'] = usertype
retv['info'] = infotxt
if self.handler != None:
self.handler([retv])
self.index = self.index+1
print "----------------------------------"+str(self.index)
| {
"content_hash": "11f71f43e8a7c9fc3bd28cfe4d6344ce",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 109,
"avg_line_length": 29.49404761904762,
"alnum_prop": 0.6359233097880929,
"repo_name": "jwang-share/theysay",
"id": "416d48b50a4b165ea707c0bcd6978b483a06932b",
"size": "4970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "black3/factory/workers/weibo/weibofollowers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CoffeeScript",
"bytes": "6594"
},
{
"name": "JavaScript",
"bytes": "24"
},
{
"name": "Python",
"bytes": "178214"
},
{
"name": "Ruby",
"bytes": "1486"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import json
import os.path
class APSTrainingScore:
def __init__(self):
self.filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'training_score.json')
with open(self.filename) as json_data:
self.score_dict = json.load(json_data)
def get_score(self, position, size, distribution, trigger, probability, dangerlevel):
pos = self.score_dict['AvalancheProblemId'][str(int(position))]
s = self.score_dict['DestructiveSizeExtId'][str(int(size))]
d = self.score_dict['AvalPropagationId'][str(int(distribution))]
t = self.score_dict['AvalTriggerSimpleId'][str(int(trigger))]
p = self.score_dict['AvalProbabilityId'][str(int(probability))]
dl = self.score_dict['DangerLevel'][str(int(dangerlevel))]
self.score = pos + (s * d) + (t * p) + dl
def load_config():
filename = 'training_score.json'
with open(filename) as json_data:
score = json.load(json_data)
distribution = np.array(list(score['AvalPropagationId'].values())) # why is it called "propagation"?
size = np.array(list(score['DestructiveSizeExtId'].values()))
trigger = np.array(list(score['AvalTriggerSimpleId'].values()))
probability = np.array(list(score['AvalProbabilityId'].values()))
position = np.array(list(score['AvalancheProblemId'].values()))
dangerlevel = np.array(list(score['DangerLevel'].values()))
def get_score(position, size, distribution, trigger, probability, dangerlevel):
return position + (size * distribution) + (trigger * probability) + dangerlevel
def get_score_range(position, size, distribution, trigger, probability, dangerlevel):
score_range = []
for d in distribution:
for s in size:
for t in trigger:
for p in probability:
for pos in position:
for dl in dangerlevel:
score_range.append(get_score(pos, s, d, t, p, dl))
return np.array(score_range)
if __name__ == "__main__":
position, size, distribution, trigger, probability, dangerlevel = load_config()
ts = get_score_range(position, size, distribution, trigger, probability, dangerlevel)
print(ts.max())
plt.plot(ts)
plt.show()
| {
"content_hash": "fbafb4184ffbe91cd396e8bf2232fe38",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 104,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.6464254952627045,
"repo_name": "kmunve/APS",
"id": "a6351638300dda20e738a0bf2a992aefd3ee7389",
"size": "2322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aps/config/training_score.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "73"
},
{
"name": "HTML",
"bytes": "54298"
},
{
"name": "JavaScript",
"bytes": "54483"
},
{
"name": "Jupyter Notebook",
"bytes": "10937958"
},
{
"name": "Python",
"bytes": "169992"
}
],
"symlink_target": ""
} |
import json
from typing import Any
from cryptography.fernet import InvalidToken as InvalidFernetToken
from sqlalchemy import Boolean, Column, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
class Variable(Base, LoggingMixin):
__tablename__ = "variable"
__NO_DEFAULT_SENTINEL = object()
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column('val', Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __repr__(self):
# Hiding the value
return '{} : {}'.format(self.key, self._val)
def get_val(self):
log = LoggingMixin().log
if self._val and self.is_encrypted:
try:
fernet = get_fernet()
return fernet.decrypt(bytes(self._val, 'utf-8')).decode()
except InvalidFernetToken:
log.error("Can't decrypt _val for key=%s, invalid token or value", self.key)
return None
except Exception:
log.error("Can't decrypt _val for key=%s, FERNET_KEY configuration missing", self.key)
return None
else:
return self._val
def set_val(self, value):
if value:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def val(cls):
return synonym('_val',
descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: str
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
obj = Variable.get(key, default_var=None,
deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj
@classmethod
@provide_session
def get(
cls,
key: str,
default_var: Any = __NO_DEFAULT_SENTINEL,
deserialize_json: bool = False,
session=None
):
obj = session.query(cls).filter(cls.key == key).first()
if obj is None:
if default_var is not cls.__NO_DEFAULT_SENTINEL:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def set(
cls,
key: str,
value: Any,
serialize_json: bool = False,
session=None
):
if serialize_json:
stored_value = json.dumps(value, indent=2)
else:
stored_value = str(value)
Variable.delete(key)
session.add(Variable(key=key, val=stored_value)) # type: ignore
session.flush()
@classmethod
@provide_session
def delete(cls, key, session=None):
session.query(cls).filter(cls.key == key).delete()
def rotate_fernet_key(self):
fernet = get_fernet()
if self._val and self.is_encrypted:
self._val = fernet.rotate(self._val.encode('utf-8')).decode()
| {
"content_hash": "709ab2675033c7e5039d84a69b93009c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 102,
"avg_line_length": 32.6015625,
"alnum_prop": 0.5871075964533908,
"repo_name": "Fokko/incubator-airflow",
"id": "085113ed472a7dda4de9752a3c5c1a2aae4b17b3",
"size": "4985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/models/variable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
from .factory import get_api_sync_engine, get_consumer_sync_engine
from .models import APIReference, ConsumerReference, PluginConfigurationReference
def publish_api(client, obj):
obj = get_api_sync_engine().publish(client, obj)
if not obj.enabled:
obj.enabled = True
APIReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
return obj
def withdraw_api(client, obj):
obj = get_api_sync_engine().withdraw(client, obj)
if obj.enabled:
obj.enabled = False
APIReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
return obj
def synchronize_api(client, obj, toggle=False):
if (toggle and obj.enabled) or (not toggle and not obj.enabled):
return withdraw_api(client, obj)
return publish_api(client, obj)
def synchronize_apis(client, queryset=None):
return get_api_sync_engine().synchronize(client, queryset=queryset, delete=True)
def publish_plugin_configuration(client, obj):
return get_api_sync_engine().plugins().publish(client, obj)
def withdraw_plugin_configuration(client, obj):
return get_api_sync_engine().plugins().withdraw(client, obj)
def enable_plugin_configuration(client, obj, enabled=True):
obj.enabled = enabled
obj = get_api_sync_engine().plugins().publish(client, obj)
# Updated enabled state without triggering another save
PluginConfigurationReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
return obj
def synchronize_plugin_configuration(client, obj, toggle=False):
enabled = not obj.enabled if toggle else obj.enabled
return enable_plugin_configuration(client, obj, enabled=enabled)
def synchronize_plugin_configurations(client, queryset=None):
return get_api_sync_engine().plugins().synchronize(client, queryset=queryset, delete=True)
def publish_consumer(client, obj):
obj = get_consumer_sync_engine().publish(client, obj)
if not obj.enabled:
obj.enabled = True
ConsumerReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
def withdraw_consumer(client, obj):
obj = get_consumer_sync_engine().withdraw(client, obj)
if obj.enabled:
obj.enabled = False
ConsumerReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
def synchronize_consumer(client, obj, toggle=False):
if (toggle and obj.enabled) or (not toggle and not obj.enabled):
return withdraw_consumer(client, obj)
return publish_consumer(client, obj)
def synchronize_consumers(client, queryset=None):
return get_consumer_sync_engine().synchronize(client, queryset=queryset, delete=True)
| {
"content_hash": "6ee3bd67598a64a8febc4aa045efecf3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 94,
"avg_line_length": 33.135802469135804,
"alnum_prop": 0.7291356184798807,
"repo_name": "vikingco/django-kong-admin",
"id": "ba65493335d9b5895289cba822209cf8ad972aec",
"size": "2708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kong_admin/logic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2185"
},
{
"name": "Makefile",
"bytes": "1246"
},
{
"name": "Python",
"bytes": "103172"
}
],
"symlink_target": ""
} |
"""
Tools for converting PyPI packages to conda recipes.
"""
from __future__ import absolute_import, division, print_function
import requests
import keyword
import os
import re
import subprocess
import sys
from collections import defaultdict
from os import makedirs, listdir, getcwd, chdir
from os.path import join, isdir, exists, isfile
from tempfile import mkdtemp
from shutil import copy2
if sys.version_info < (3,):
from xmlrpclib import ServerProxy, Transport, ProtocolError
from urllib2 import build_opener, ProxyHandler, Request, HTTPError
else:
from xmlrpc.client import ServerProxy, Transport, ProtocolError
from urllib.request import build_opener, ProxyHandler, Request
from urllib.error import HTTPError
from conda.fetch import (download, handle_proxy_407)
from conda.connection import CondaSession
from conda.utils import human_bytes, hashsum_file
from conda.install import rm_rf
from conda.compat import input, configparser, StringIO, string_types, PY3
from conda.config import get_proxy_servers
from conda.cli.common import spec_from_line
from conda_build.utils import tar_xf, unzip
from conda_build.source import SRC_CACHE, apply_patch
from conda_build.build import create_env
from conda_build.config import config
PYPI_META = """\
package:
name: {packagename}
version: !!str {version}
source:
fn: {filename}
url: {pypiurl}
{usemd5}md5: {md5}
# patches:
# List any patch files here
# - fix.patch
{build_comment}build:
{egg_comment}preserve_egg_dir: True
{entry_comment}entry_points:
# Put any entry points (scripts to be generated automatically) here. The
# syntax is module:function. For example
#
# - {packagename} = {packagename}:main
#
# Would create an entry point called {packagename} that calls {packagename}.main()
{entry_points}
# If this is a new build for the same version, increment the build
# number. If you do not include this key, it defaults to 0.
# number: 1
requirements:
build:
- python{build_depends}
run:
- python{run_depends}
test:
# Python imports
{import_comment}imports:{import_tests}
{entry_comment}commands:
# You can put test commands to be run here. Use this to test that the
# entry points work.
{test_commands}
# You can also put a file called run_test.py in the recipe that will be run
# at test time.
# requires:
# Put any additional test requirements here. For example
# - nose
about:
home: {homeurl}
license: {license}
summary: {summary}
# See
# http://docs.continuum.io/conda/build.html for
# more information about meta.yaml
"""
PYPI_BUILD_SH = """\
#!/bin/bash
$PYTHON setup.py install
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
"""
PYPI_BLD_BAT = """\
"%PYTHON%" setup.py install
if errorlevel 1 exit 1
:: Add more build steps here, if they are necessary.
:: See
:: http://docs.continuum.io/conda/build.html
:: for a list of environment variables that are set during the build process.
"""
# Note the {} formatting bits here
DISTUTILS_PATCH = '''\
diff core.py core.py
--- core.py
+++ core.py
@@ -166,5 +167,33 @@ def setup (**attrs):
\n
+# ====== BEGIN CONDA SKELETON PYPI PATCH ======
+
+import distutils.core
+import io
+import os.path
+import sys
+import yaml
+from yaml import Loader, SafeLoader
+
+# Override the default string handling function to always return unicode
+# objects (taken from StackOverflow)
+def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
+SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
+
+def setup(*args, **kwargs):
+ data = {{}}
+ data['install_requires'] = kwargs.get('install_requires', [])
+ data['extras_require'] = kwargs.get('extras_require', {{}})
+ data['entry_points'] = kwargs.get('entry_points', [])
+ data['packages'] = kwargs.get('packages', [])
+ data['setuptools'] = 'setuptools' in sys.modules
+ with io.open(os.path.join("{}", "pkginfo.yaml"), 'w', encoding='utf-8') as fn:
+ fn.write(yaml.dump(data, encoding=None))
+
+
+# ======= END CONDA SKELETON PYPI PATCH ======
\n
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
'''
# https://gist.github.com/chrisguitarguy/2354951
class RequestsTransport(Transport):
"""
Drop in Transport for xmlrpclib that uses Requests instead of httplib
"""
# change our user agent to reflect Requests
user_agent = "Python XMLRPC with Requests (python-requests.org)"
# override this if you'd like to https
use_https = True
session = CondaSession()
def request(self, host, handler, request_body, verbose):
"""
Make an xmlrpc request.
"""
headers = {
'User-Agent': self.user_agent,
'Content-Type': 'text/xml',
}
url = self._build_url(host, handler)
try:
resp = self.session.post(url, data=request_body, headers=headers, proxies=self.session.proxies)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 407: # Proxy Authentication Required
handle_proxy_407(url, self.session)
# Try again
return self.request(host, handler, request_body, verbose)
else:
raise
except requests.exceptions.ConnectionError as e:
# requests isn't so nice here. For whatever reason, https gives this
# error and http gives the above error. Also, there is no status_code
# attribute here. We have to just check if it looks like 407. See
# https://github.com/kennethreitz/requests/issues/2061.
if "407" in str(e): # Proxy Authentication Required
handle_proxy_407(url, self.session)
# Try again
return self.request(host, handler, request_body, verbose)
else:
raise
except requests.RequestException as e:
raise ProtocolError(url, resp.status_code, str(e), resp.headers)
else:
return self.parse_response(resp)
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
p.feed(resp.text)
p.close()
return u.close()
def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_http
property
"""
scheme = 'https' if self.use_https else 'http'
return '%s://%s/%s' % (scheme, host, handler)
def main(args, parser):
if len(args.packages) > 1 and args.download:
# Because if a package's setup.py imports setuptools, it will make all
# future packages look like they depend on distribute. Also, who knows
# what kind of monkeypatching the setup.pys out there could be doing.
print("WARNING: building more than one recipe at once without "
"--no-download is not recommended")
proxies = get_proxy_servers()
if proxies:
transport = RequestsTransport()
else:
transport = None
client = ServerProxy(args.pypi_url, transport=transport)
package_dicts = {}
[output_dir] = args.output_dir
indent = '\n - '
all_packages = client.list_packages()
all_packages_lower = [i.lower() for i in all_packages]
while args.packages:
package = args.packages.pop()
# Look for package[extra,...] features spec:
match_extras = re.match(r'^([^[]+)\[([^]]+)\]$', package)
if match_extras:
package, extras = match_extras.groups()
extras = extras.split(',')
else:
extras = []
dir_path = join(output_dir, package.lower())
if exists(dir_path):
raise RuntimeError("directory already exists: %s" % dir_path)
d = package_dicts.setdefault(package, {'packagename': package.lower(),
'run_depends': '',
'build_depends': '',
'entry_points': '',
'build_comment': '# ',
'test_commands': '',
'usemd5': '',
'entry_comment': '#',
'egg_comment': '#'})
d['import_tests'] = valid(package).lower()
if d['import_tests'] == '':
d['import_comment'] = '# '
else:
d['import_comment'] = ''
d['import_tests'] = indent + d['import_tests']
if args.version:
[version] = args.version
versions = client.package_releases(package, True)
if version not in versions:
sys.exit("Error: Version %s of %s is not available on PyPI."
% (version, package))
d['version'] = version
else:
versions = client.package_releases(package)
if not versions:
# The xmlrpc interface is case sensitive, but the index itself
# is apparently not (the last time I checked,
# len(set(all_packages_lower)) == len(set(all_packages)))
if package.lower() in all_packages_lower:
print("%s not found, trying %s" % (package, package.capitalize()))
args.packages.append(all_packages[all_packages_lower.index(package.lower())])
del package_dicts[package]
continue
sys.exit("Error: Could not find any versions of package %s" %
package)
if len(versions) > 1:
print("Warning, the following versions were found for %s" %
package)
for ver in versions:
print(ver)
print("Using %s" % versions[0])
print("Use --version to specify a different version.")
d['version'] = versions[0]
data = client.release_data(package, d['version'])
urls = client.release_urls(package, d['version'])
if not args.all_urls:
# Try to find source urls
urls = [url for url in urls if url['python_version'] == 'source']
if not urls:
if 'download_url' in data:
urls = [defaultdict(str, {'url': data['download_url']})]
urls[0]['filename'] = urls[0]['url'].split('/')[-1]
d['usemd5'] = '#'
else:
sys.exit("Error: No source urls found for %s" % package)
if len(urls) > 1 and not args.noprompt:
print("More than one source version is available for %s:" %
package)
for i, url in enumerate(urls):
print("%d: %s (%s) %s" % (i, url['url'],
human_bytes(url['size']),
url['comment_text']))
n = int(input("Which version should I use? "))
else:
n = 0
print("Using url %s (%s) for %s." % (urls[n]['url'],
human_bytes(urls[n]['size'] or 0),
package))
d['pypiurl'] = urls[n]['url']
d['md5'] = urls[n]['md5_digest']
d['filename'] = urls[n]['filename']
d['homeurl'] = data['home_page']
d['summary'] = repr(data['summary'])
license_classifier = "License :: OSI Approved ::"
if 'classifiers' in data:
licenses = [classifier.split(license_classifier, 1)[1] for classifier in
data['classifiers'] if classifier.startswith(license_classifier)]
else:
licenses = []
if not licenses:
if data['license']:
if args.noprompt:
license = data['license']
elif '\n' not in data['license']:
print('Using "%s" for the license' % data['license'])
license = data['license']
else:
# Some projects put the whole license text in this field
print("This is the license for %s" % package)
print()
print(data['license'])
print()
license = input("What license string should I use? ")
else:
if args.noprompt:
license = "UNKNOWN"
else:
license = input(("No license could be found for %s on " +
"PyPI. What license should I use? ") %
package)
else:
license = ' or '.join(licenses)
d['license'] = license
# Unfortunately, two important pieces of metadata are only stored in
# the package itself: the dependencies, and the entry points (if the
# package uses distribute). Our strategy is to download the package
# and "fake" distribute/setuptools's setup() function to get this
# information from setup.py. If this sounds evil, keep in mind that
# distribute itself already works by monkeypatching distutils.
if args.download:
import yaml
print("Downloading %s (use --no-download to skip this step)" %
package)
tempdir = mkdtemp('conda_skeleton_' + package)
if not isdir(SRC_CACHE):
makedirs(SRC_CACHE)
try:
# Download it to the build source cache. That way, you have
# it.
download_path = join(SRC_CACHE, d['filename'])
if not isfile(download_path) or hashsum_file(download_path,
'md5') != d['md5']:
download(d['pypiurl'], join(SRC_CACHE, d['filename']))
else:
print("Using cached download")
print("Unpacking %s..." % package)
unpack(join(SRC_CACHE, d['filename']), tempdir)
print("done")
print("working in %s" % tempdir)
src_dir = get_dir(tempdir)
run_setuppy(src_dir, tempdir, args)
with open(join(tempdir, 'pkginfo.yaml')) as fn:
pkginfo = yaml.load(fn)
setuptools_build = pkginfo['setuptools']
setuptools_run = False
# Look at the entry_points and construct console_script and
# gui_scripts entry_points for conda
entry_points = pkginfo['entry_points']
if entry_points:
if isinstance(entry_points, str):
# makes sure it is left-shifted
newstr = "\n".join(x.strip()
for x in entry_points.split('\n'))
config = configparser.ConfigParser()
entry_points = {}
try:
config.readfp(StringIO(newstr))
except Exception as err:
print("WARNING: entry-points not understood: ",
err)
print("The string was", newstr)
entry_points = pkginfo['entry_points']
else:
setuptools_run = True
for section in config.sections():
if section in ['console_scripts', 'gui_scripts']:
value = ['%s=%s' % (option, config.get(section, option))
for option in config.options(section)]
entry_points[section] = value
if not isinstance(entry_points, dict):
print("WARNING: Could not add entry points. They were:")
print(entry_points)
else:
cs = entry_points.get('console_scripts', [])
gs = entry_points.get('gui_scripts', [])
# We have *other* kinds of entry-points so we need
# setuptools at run-time
if not cs and not gs and len(entry_points) > 1:
setuptools_build = True
setuptools_run = True
entry_list = (
cs
# TODO: Use pythonw for these
+ gs)
if len(cs + gs) != 0:
d['entry_points'] = indent.join([''] + entry_list)
d['entry_comment'] = ''
d['build_comment'] = ''
d['test_commands'] = indent.join([''] + make_entry_tests(entry_list))
# Extract requested extra feature requirements...
if args.all_extras:
extras_require = list(pkginfo['extras_require'].values())
else:
try:
extras_require = [pkginfo['extras_require'][x] for x in extras]
except KeyError:
sys.exit("Error: Invalid extra features: [%s]"
% ','.join(extras))
#... and collect all needed requirement specs in a single list:
requires = []
for specs in [pkginfo['install_requires']] + extras_require:
if isinstance(specs, string_types):
requires.append(specs)
else:
requires.extend(specs)
if requires or setuptools_build or setuptools_run:
deps = []
for deptext in requires:
# Every item may be a single requirement
# or a multiline requirements string...
for dep in deptext.split('\n'):
#... and may also contain comments...
dep = dep.split('#')[0].strip()
if dep: #... and empty (or comment only) lines
spec = spec_from_line(dep)
if spec is None:
sys.exit("Error: Could not parse: %s" % dep)
deps.append(spec)
if 'setuptools' in deps:
setuptools_build = False
setuptools_run = False
d['egg_comment'] = ''
d['build_comment'] = ''
d['build_depends'] = indent.join([''] +
['setuptools'] * setuptools_build +
deps)
d['run_depends'] = indent.join([''] +
['setuptools'] * setuptools_run +
deps)
if args.recursive:
for dep in deps:
dep = dep.split()[0]
if not exists(join(output_dir, dep)):
args.packages.append(dep)
if pkginfo['packages']:
deps = set(pkginfo['packages'])
if d['import_tests']:
olddeps = [x for x in d['import_tests'].split()
if x != '-']
deps = set(olddeps) | deps
d['import_tests'] = indent.join([''] + sorted(deps))
d['import_comment'] = ''
finally:
rm_rf(tempdir)
for package in package_dicts:
d = package_dicts[package]
makedirs(join(output_dir, package.lower()))
print("Writing recipe for %s" % package.lower())
with open(join(output_dir, package.lower(), 'meta.yaml'), 'w') as f:
f.write(PYPI_META.format(**d))
with open(join(output_dir, package.lower(), 'build.sh'), 'w') as f:
f.write(PYPI_BUILD_SH.format(**d))
with open(join(output_dir, package.lower(), 'bld.bat'), 'w') as f:
f.write(PYPI_BLD_BAT.format(**d))
print("Done")
def valid(name):
if (re.match("[_A-Za-z][_a-zA-Z0-9]*$", name)
and not keyword.iskeyword(name)):
return name
else:
return ''
def unpack(src_path, tempdir):
if src_path.endswith(('.tar.gz', '.tar.bz2', '.tgz', '.tar.xz', '.tar')):
tar_xf(src_path, tempdir)
elif src_path.endswith('.zip'):
unzip(src_path, tempdir)
else:
raise Exception("not a valid source")
def get_dir(tempdir):
lst = [fn for fn in listdir(tempdir) if not fn.startswith('.') and
isdir(join(tempdir, fn))]
if len(lst) == 1:
dir_path = join(tempdir, lst[0])
if isdir(dir_path):
return dir_path
raise Exception("could not find unpacked source dir")
def run_setuppy(src_dir, temp_dir, args):
'''
Patch distutils and then run setup.py in a subprocess.
:param src_dir: Directory containing the source code
:type src_dir: str
:param temp_dir: Temporary directory for doing for storing pkginfo.yaml
:type temp_dir: str
'''
# Do everything in the build env in case the setup.py install goes
# haywire.
# TODO: Try with another version of Python if this one fails. Some
# packages are Python 2 or Python 3 only.
create_env(config.build_prefix, ['python %s*' % args.python_version, 'pyyaml',
'setuptools', 'numpy'], clear_cache=False)
stdlib_dir = join(config.build_prefix, 'Lib' if sys.platform == 'win32' else
'lib/python%s' % args.python_version)
patch = join(temp_dir, 'pypi-distutils.patch')
with open(patch, 'w') as f:
f.write(DISTUTILS_PATCH.format(temp_dir.replace('\\','\\\\')))
if exists(join(stdlib_dir, 'distutils', 'core.py-copy')):
rm_rf(join(stdlib_dir, 'distutils', 'core.py'))
copy2(join(stdlib_dir, 'distutils', 'core.py-copy'), join(stdlib_dir, 'distutils', 'core.py'))
# Avoid race conditions. Invalidate the cache.
if PY3:
rm_rf(join(stdlib_dir, 'distutils', '__pycache__',
'core.cpython-%s%s.pyc' % sys.version_info[:2]))
rm_rf(join(stdlib_dir, 'distutils', '__pycache__',
'core.cpython-%s%s.pyo' % sys.version_info[:2]))
else:
rm_rf(join(stdlib_dir, 'distutils', 'core.pyc'))
rm_rf(join(stdlib_dir, 'distutils', 'core.pyo'))
else:
copy2(join(stdlib_dir, 'distutils', 'core.py'), join(stdlib_dir,
'distutils', 'core.py-copy'))
apply_patch(join(stdlib_dir, 'distutils'), patch)
# Save PYTHONPATH for later
env = os.environ.copy()
if 'PYTHONPATH' in env:
env[str('PYTHONPATH')] = str(src_dir + ':' + env['PYTHONPATH'])
else:
env[str('PYTHONPATH')] = str(src_dir)
cwd = getcwd()
chdir(src_dir)
args = [config.build_python, 'setup.py', 'install']
try:
subprocess.check_call(args, env=env)
except subprocess.CalledProcessError:
print('$PYTHONPATH = %s' % env['PYTHONPATH'])
sys.exit('Error: command failed: %s' % ' '.join(args))
finally:
chdir(cwd)
def make_entry_tests(entry_list):
tests = []
for entry_point in entry_list:
entry = entry_point.partition('=')[0].strip()
tests.append(entry + " --help")
return tests
| {
"content_hash": "7047f92cec04ea3b6deea41d3876a451",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 107,
"avg_line_length": 39.30483870967742,
"alnum_prop": 0.5250523205712175,
"repo_name": "tacaswell/conda-build",
"id": "073427205e38d20426d320f80c37c61413ef4e0a",
"size": "24369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_build/pypi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Copyright 2015-2020 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import unittest
import tempfile
import os
import pkg_resources
from shogun.parsers.sam import yield_alignments_from_sam_inf
class TestParsers(unittest.TestCase):
def setUp(self):
prefix = 'shogun-temp-dir-'
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
def tearDown(self):
self.temp_dir.cleanup()
def test_yield_alignments_from_samfile(self):
inf_sam = pkg_resources.resource_filename('shogun.tests', os.path.join('data', 'results', 'bowtie2_results.sam'))
gen = yield_alignments_from_sam_inf(inf_sam)
i = len([1 for record in enumerate(gen)])
assert i == 190
| {
"content_hash": "a2fa79964b81a3df08f6daafb7c1009b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 121,
"avg_line_length": 30.37037037037037,
"alnum_prop": 0.7012195121951219,
"repo_name": "knights-lab/NINJA-SHOGUN",
"id": "7b6f59a77702e7300a434d980796ecfcda67b889",
"size": "820",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shogun/parsers/tests/test_parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45010"
}
],
"symlink_target": ""
} |
import sys
from hippy.objects.base import W_Object
from hippy.objects.support import _new_binop
from hippy.consts import BINOP_LIST, BINOP_COMPARISON_LIST
from rpython.rlib.rarithmetic import intmask, ovfcheck
from rpython.rlib.rfloat import isnan, isinf, double_to_string, DTSF_CUT_EXP_0
MAX_PRECISION = 500
class W_FloatObject(W_Object):
_immutable_fields_ = ['floatval']
supports_arithmetics = True
def __init__(self, floatval):
assert isinstance(floatval, float)
self.floatval = floatval
def __eq__(self, other):
""" For testing """
return (isinstance(other, W_FloatObject) and
self.floatval == other.floatval)
def _truncate(self, space):
try:
intval = ovfcheck(int(self.floatval))
except OverflowError:
intval = 0
return space.newint(intval)
def as_number(self, space):
return self
def str(self, space, quiet=False):
prec = space.ec.interpreter.config.get_precision() or 10
# Zend does that:
if prec > MAX_PRECISION:
prec = MAX_PRECISION
return self._repr(prec)
def repr(self):
return self._repr()
def _repr(self, prec=14):
_str, _ = double_to_string(self.floatval, "G", prec, DTSF_CUT_EXP_0)
if 'E' in _str and '.' not in _str:
a, b = _str.split('E')
return a + '.0E' + b
return _str
def dump(self):
return str(self.floatval)
def int_w(self, space):
if isnan(self.floatval):
result = -sys.maxint - 1
space.ec.hippy_warn("cast float to integer: NaN"
" is returned as %d" % result)
return result
try:
result = intmask(int(self.floatval))
except OverflowError:
result = 0 # +/- infinity
if abs(result - self.floatval) > 1.0:
space.ec.hippy_warn("cast float to integer: value %s overflows"
" and is returned as %d"
% (self.str(space), result))
return result
def float_w(self, space):
return self.floatval
def is_true(self, space):
return self.floatval != 0.0
def uplus(self, space):
return self
def uminus(self, space):
return space.newfloat(-self.floatval)
def uplusplus(self, space):
return space.newfloat(self.floatval + 1)
def uminusminus(self, space):
return space.newfloat(self.floatval - 1)
def bitwise_not(self, space):
return space.newint(~self.int_w(space))
def div(self, space, w_other):
assert isinstance(w_other, W_FloatObject)
x = self.floatval
y = w_other.floatval
if y == 0.:
space.ec.warn("Division by zero")
return space.w_False
return W_FloatObject(x / y)
def __repr__(self):
return 'W_FloatObject(%r)' % self.floatval
def var_dump(self, space, indent, recursion):
if isinf(self.floatval):
inf = "%s" % self.floatval
return "%sfloat(%s)\n" % (indent, inf.upper())
if isnan(self.floatval):
return "%sfloat(NAN)\n" % (indent,)
return "%sfloat(%s)\n" % (indent, self.str(space))
def var_export(self, space, indent, recursion, suffix):
if isinf(self.floatval):
inf = "%s" % self.floatval
return "%s" % inf.upper()
if isnan(self.floatval):
return "NAN"
out = "%s%s%s" % (indent, self.str(space), suffix)
return out
def abs(self, space):
return W_FloatObject(abs(self.floatval))
def overflow_convert(self, space):
return space.wrap(self.float_w(space))
def eval_static(self, space):
return self
def serialize(self, space, builder, memo):
prec = memo.serialize_precision
if prec == 0:
prec = (space.int_w(
space.ec.interpreter.config.get_ini_w('serialize_precision'))
or 17)
memo.serialize_precision = prec
builder.append("d:")
builder.append(self._repr(prec=prec))
builder.append(";")
return True
for _name in BINOP_LIST:
if hasattr(W_FloatObject, _name):
continue
setattr(W_FloatObject, _name, _new_binop(W_FloatObject, _name,
'floatval',
_name in BINOP_COMPARISON_LIST))
| {
"content_hash": "3be88ac98e14e4829e5db51c515449f2",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 78,
"avg_line_length": 30.44295302013423,
"alnum_prop": 0.5582010582010583,
"repo_name": "xhava/hippyvm",
"id": "6825523cf209f7f28ef3954230a73a3f8a252f35",
"size": "4536",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hippy/objects/floatobject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1559"
},
{
"name": "C",
"bytes": "2544055"
},
{
"name": "C++",
"bytes": "255972"
},
{
"name": "HTML",
"bytes": "415"
},
{
"name": "JavaScript",
"bytes": "453641"
},
{
"name": "Makefile",
"bytes": "4793"
},
{
"name": "PHP",
"bytes": "15041037"
},
{
"name": "Python",
"bytes": "2503719"
},
{
"name": "Shell",
"bytes": "15527"
}
],
"symlink_target": ""
} |
import logging
log = logging.getLogger(__name__)
import pandas as pd
from atom.api import (Typed, set_default, observe, Enum, Event, Property,
Bool, Dict, Unicode, Atom, List, Value)
from enaml.core.declarative import d_, d_func
from enaml.widgets.api import RawWidget
from enaml.qt.QtCore import QAbstractTableModel, QModelIndex, Qt
from enaml.qt.QtWidgets import QTableView, QHeaderView, QAbstractItemView
from enaml.qt.QtGui import QColor
from .event_filter import EventFilter
class QEditableTableModel(QAbstractTableModel):
def __init__(self, interface, **kw):
self.interface = interface
super().__init__(**kw)
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return int(Qt.AlignHCenter | Qt.AlignVCenter)
return int(Qt.AlignRight | Qt.AlignVCenter)
elif role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return str(self.interface.get_column_label(section))
else:
return str(self.interface.get_row_label(section))
def flags(self, index):
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if self.interface.editable:
flags = flags | Qt.ItemIsEditable
return flags
def data(self, index, role):
if not index.isValid():
return
if role == Qt.BackgroundRole:
r = index.row()
c = index.column()
color_name = self.interface.get_cell_color(r, c)
color = QColor()
color.setNamedColor(color_name)
return color
elif role in (Qt.DisplayRole, Qt.EditRole):
r = index.row()
c = index.column()
return self.interface._get_data(r, c)
def setData(self, index, value, role):
with self.interface.live_edit:
r = index.row()
c = index.column()
try:
self.interface._set_data(r, c, value)
except:
raise
pass
self.dataChanged.emit(index, index)
return True
def removeRows(self, row, count, index):
with self.interface.live_edit:
self.beginRemoveRows(index, row, row)
self.interface._remove_row(row)
self.endRemoveRows()
return True
def insertRows(self, row, count, index):
with self.interface.live_edit:
self.beginInsertRows(index, row, row)
self.interface._insert_row(row)
self.endInsertRows()
return True
def columnCount(self, index=QModelIndex()):
if self.interface is None:
return 0
return len(self.interface.get_columns())
def rowCount(self, index=QModelIndex()):
if self.interface is None:
return 0
return len(self.interface.get_rows())
def sort(self, column_index, order):
ascending = order == Qt.AscendingOrder
self.interface.sort_rows(column_index, ascending)
class QEditableTableView(QTableView):
def __init__(self, model, parent=None, **kwds):
super().__init__(parent=parent, **kwds)
self.model = model
self.setModel(model)
self._setup_hheader()
self._setup_vheader()
self.setVerticalScrollMode(QAbstractItemView.ScrollPerItem)
self.setHorizontalScrollMode(QAbstractItemView.ScrollPerItem)
self._set_default_column_widths()
select_mode = self.model.interface.select_mode
select_behavior = self.model.interface.select_behavior
if select_mode is None:
self.setSelectionMode(self.NoSelection)
else:
flag_name = '{}Selection'.format(select_mode.capitalize())
self.setSelectionMode(getattr(self, flag_name))
flag_name = 'Select{}'.format(select_behavior.capitalize())
self.setSelectionBehavior(getattr(self, flag_name))
self.selectionModel().selectionChanged.connect(self._selection_changed)
self.setShowGrid(self.model.interface.show_grid)
def _selection_changed(self, selected, deselected):
locations = []
selection_model = self.selectionModel()
for index in selection_model.selectedIndexes():
locations.append((index.row(), index.column()))
self.model.interface.selected_coords = locations
def _set_default_column_widths(self):
widths = self.model.interface.get_default_column_widths()
self.set_column_widths(widths)
def _setup_vheader(self):
header = self.verticalHeader()
header.setSectionResizeMode(QHeaderView.Fixed)
header.setDefaultSectionSize(20)
header.setSectionsMovable(False)
if not self.model.interface.show_row_labels:
header.setVisible(False)
def _setup_hheader(self):
header = self.horizontalHeader()
header.setSectionsMovable(self.model.interface.columns_movable)
if not self.model.interface.show_column_labels:
header.setVisible(False)
if self.model.interface.stretch_last_section:
header.setStretchLastSection(True)
resize_mode = self.model.interface.header_resize_mode
if resize_mode == 'contents':
resize_mode = 'ResizeToContents'
else:
resize_mode = resize_mode.capitalize()
log.debug('Setting header resize mode to %s', resize_mode)
header.setSectionResizeMode(getattr(header, resize_mode))
def remove_selected_rows(self):
selection_model = self.selectionModel()
rows = [index.row() for index in selection_model.selectedRows()]
for row in sorted(rows, reverse=True):
self.model.removeRow(row)
def get_selected_rows(self):
return sorted(r for r, c in self.model.interface.selected_coords)
def last_row_current(self):
selected_row = self.currentIndex().row()
return (selected_row + 1) == self.model.rowCount()
def insert_row(self):
rows = self.get_selected_rows()
if len(rows) == 0:
self.model.insertRow(0)
for row in sorted(rows, reverse=True):
self.model.insertRow(row)
def insert_row_at_end(self):
self.model.insertRow(self.model.rowCount())
def get_column_config(self):
log.debug('Geting column config')
try:
config = {}
columns = self.model.interface.get_columns()
header = self.horizontalHeader()
for i, c in enumerate(columns):
config[c] = {'width': self.columnWidth(i)}
if self.model.interface.columns_movable:
config[c]['visual_index'] = header.visualIndex(i)
return config
except Exception as e:
log.exception(e)
def set_column_config(self, config):
columns = self.model.interface.get_columns()
for i, c in enumerate(columns):
try:
width = config[c]['width']
self.setColumnWidth(i, width)
log.debug('Set column width for %s to %d', c, width)
except KeyError as e:
log.debug('Unable to set column width for %s', c)
if self.model.interface.columns_movable:
header = self.horizontalHeader()
visual_indices = []
for i, c in enumerate(columns):
try:
vi = config[c]['visual_index']
visual_indices.append((vi, i, c))
except KeyError as e:
log.debug('Unable to find visual index for %s', c)
# Since the current visual index of each column will change as we
# rearrange them, we need to figure out which column should appear
# first and put it there, then move to the next column.
for vi, li, c in sorted(visual_indices):
current_vi = header.visualIndex(li)
header.moveSection(current_vi, vi)
log.debug('Set visual index for %s to %d', c, vi)
# CAN DEPRECATE THIS
def get_column_widths(self):
widths = {}
columns = self.model.interface.get_columns()
header = self.horizontalHeader()
for i, c in enumerate(columns):
widths[c] = header.sectionSize(i)
return widths
# CAN DEPRECATE THIS
def set_column_widths(self, widths):
columns = self.model.interface.get_columns()
for i, c in enumerate(columns):
try:
width = widths[c]
self.setColumnWidth(i, width)
except KeyError:
pass
class LiveEdit:
def __init__(self):
self._editing = False
def __enter__(self):
self._editing = True
def __exit__(self, type, value, traceback):
self._editing = False
def __bool__(self):
return self._editing
class EditableTable(RawWidget):
# Expand the table by default
hug_width = set_default('weak')
hug_height = set_default('weak')
model = Typed(QEditableTableModel)
#: Instance of QEditableTableView
view = Typed(QEditableTableView)
event_filter = Typed(EventFilter)
editable = d_(Bool(False))
autoscroll = d_(Bool(False))
show_column_labels = d_(Bool(True))
show_row_labels = d_(Bool(True))
show_grid = d_(Bool(True))
#: Dictionary mapping column name to a dictionary of settings for that
#: column. Valid keys for each setting include:
#: * compact_label - Column label (preferred).
#: * label - Column label (used if compact_label not provided).
#: * default - Value used for adding rows.
#: * coerce - Function to coerce text entered in column to correct value.
#: * initial_width - Initial width to set column to.
column_info = d_(Dict(Unicode(), Typed(object), {}))
#: Widths of columns in table
column_widths = Property()
#: Dictionary mapping column name to a dictionary of column properties:
#: * visual_index: Visual position of column in table
#: * width: Width of column in table
column_config = Property()
#: Can columns be rearranged by dragging labels in the header?
columns_movable = d_(Bool(True))
data = d_(Typed(object))
update = d_(Bool())
updated = d_(Event())
# List of row, col tuples of selections
selected_coords = d_(List(), [])
live_edit = Typed(LiveEdit, {})
select_behavior = d_(Enum('items', 'rows', 'columns'))
select_mode = d_(Enum('single', 'contiguous', 'extended', 'multi', None))
#: Strectch width of last column so it fills rest of table?
stretch_last_section = d_(Bool(True))
#: How can column headers be resized?
header_resize_mode = d_(Enum('interactive', 'fixed', 'stretch',
'contents'))
def get_column_attribute(self, column_name, attribute, default,
raise_error=False):
column = self.column_info.get(column_name, {})
try:
return column[attribute]
except (KeyError, TypeError):
try:
return getattr(column, attribute)
except AttributeError:
if raise_error:
raise
else:
return default
@d_func
def get_cell_color(self, row_index, column_index):
'''
Parameters
----------
row_index : int
Row index (zero-based)
column_index : int
Column index (zero-based)
Result
------
color : SVG color name or hex color code
Color to use for the background cell. Defaults to white. See
http://www.december.com/html/spec/colorsvg.html for SVG color
names.
'''
# Given the row and column
# This must return one of the SVG color names (see
return 'white'
@d_func
def get_row_label(self, row_index):
'''
Parameters
----------
row_index : int
Row index (zero-based)
Result
------
label : str
Label to use for column header. Defaults to a 1-based row number.
'''
return str(row_index+1)
@d_func
def get_column_label(self, column_index):
'''
Parameters
----------
column_index : int
Column index (zero-based)
Result
------
label : str
Label to use for row header. Defaults to the 'compact_label' key in
'column_info'. If 'compact_label' is not found, checks for the
'label' key.
'''
column = self.get_columns()[column_index]
try:
return self.get_column_attribute(column, 'compact_label', column,
raise_error=True)
except AttributeError:
return self.get_column_attribute(column, 'label', column)
@d_func
def get_rows(self):
if self.data is None:
return []
return range(len(self.data))
@d_func
def get_columns(self):
'''
Result
------
column_labels : list of str
List of column labels.
'''
raise NotImplementedError
@d_func
def get_data(self, row_index, column_index):
'''
Parameters
----------
row_index : int
Row index (zero-based)
column_index : int
Column index (zero-based)
Result
------
data : object
Data to be shown in cell.
'''
raise NotImplementedError
@d_func
def set_data(self, row_index, column_index, value):
'''
Save value at specified row and column index to data
Parameters
----------
row_index : int
Row index (zero-based)
column_index : int
Column index (zero-based)
value : object
'''
raise NotImplementedError
@d_func
def remove_row(self, row_index):
raise NotImplementedError
@d_func
def insert_row(self, row=None):
raise NotImplementedError
def _get_data(self, row_index, column_index):
try:
value = self.get_data(row_index, column_index)
column = self.get_columns()[column_index]
formatter = self.column_info.get(column, {}).get('to_string', str)
return formatter(value)
except Exception as e:
log.warning(e)
return ''
def _set_data(self, *args):
self.set_data(*args)
self.updated = True
def _remove_row(self, *args):
self.remove_row(*args)
self.updated = True
def _insert_row(self, *args):
self.insert_row(*args)
self.updated = True
@d_func
def sort_rows(self, column_index, ascending):
raise NotImplementedError
@d_func
def get_default_row(self):
values = []
for column in self.get_columns():
default = self.column_info.get(column, {}).get('default', None)
values.append(default)
return values
@d_func
def coerce_to_type(self, column_index, value):
column = self.get_columns()[column_index]
func = self.column_info.get(column, {}).get('coerce', lambda x: x)
return func(value)
def create_widget(self, parent):
self.model = QEditableTableModel(self)
self.view = QEditableTableView(self.model, parent=parent)
if self.editable:
self.event_filter = EventFilter(self.view)
self.view.installEventFilter(self.event_filter)
return self.view
def _observe_data(self, event):
# TODO: for lists does not reset if the values are equivalent. We then
# lose a reference to the actual list.
self._reset_model()
def _observe_columns(self, event):
self._reset_model()
def _observe_column_info(self, event):
self._reset_model()
def _observe_update(self, event):
if self.update:
self._reset_model()
self.update = False
def _reset_model(self, event=None):
# Forces a reset of the model and view
self.model.beginResetModel()
self.model.endResetModel()
if self.autoscroll and self.view:
self.view.scrollToBottom()
def _get_column_widths(self):
return self.view.get_column_widths()
def _set_column_widths(self, widths):
self.view.set_column_widths(widths)
self._reset_model()
def get_default_column_widths(self):
return {c: self.get_column_attribute(c, 'initial_width', 100) \
for c in self.get_columns()}
def _get_column_config(self):
return self.view.get_column_config()
def _set_column_config(self, config):
self.view.set_column_config(config)
self._reset_model()
def get_visual_columns(self):
if not self.columns_movable:
return self.get_columns()
config = self.column_config
indices = [(cfg['visual_index'], c) for c, cfg in config.items()]
indices.sort()
return [i[1] for i in indices]
def as_string(self):
rows = self.get_rows()
visual_cols = self.get_visual_columns()
cols = self.get_columns()
table_strings = []
for r in range(len(rows)):
row_data = []
for v in visual_cols:
c = cols.index(v)
row_data.append(self.get_data(r, c))
row_string = '\t'.join(str(d) for d in row_data)
table_strings.append(row_string)
return '\n'.join(table_strings)
class DataFrameTable(EditableTable):
data = d_(Typed(pd.DataFrame))
columns = d_(Typed(object))
def _observe_columns(self, event):
self._reset_model()
@d_func
def get_columns(self):
if self.columns is not None:
return self.columns
if self.data is None:
return []
return self.data.columns
def get_data(self, row_index, column_index):
row_label = self.data.index[row_index]
column_label = self.get_columns()[column_index]
return self.data.at[row_label, column_label]
def set_data(self, row_index, column_index, value):
value = self.coerce_to_type(column_index, value)
row_label = self.data.index[row_index]
column_label = self.get_columns()[column_index]
self.data.at[row_label, column_label] = value
def remove_row(self, row_index):
label = self.data.index[row_index]
self.data.drop(label, inplace=True)
self.data.index = range(len(self.data))
def insert_row(self, row_index):
values = self.get_default_row()
self.data.loc[row_index + 0.5] = values
self.data.sort_index(inplace=True)
self.data.index = range(len(self.data))
class ListDictTable(EditableTable):
#: List of dictionaries where list index maps to row and dictionary key
#: maps to column.
data = d_(List())
#: List of column names. If not provided, defaults to dictionary keys
#: provided by the first entry in `data`.
columns = d_(List())
def get_columns(self):
if self.columns:
return self.columns
if (self.data is not None) and (len(self.data) != 0):
return list(self.data[0].keys())
return []
def get_data(self, row_index, column_index):
column = self.get_columns()[column_index]
return self.data[row_index][column]
def set_data(self, row_index, column_index, value):
value = self.coerce_to_type(column_index, value)
column = self.get_columns()[column_index]
self.data[row_index][column] = value
def get_default_row(self):
values = super().get_default_row()
keys = self.get_columns()
return dict(zip(keys, values))
def insert_row(self, row_index):
values = self.get_default_row()
self.data.insert(row_index+1, values)
def remove_row(self, row_index):
self.data.pop(row_index)
class ListTable(EditableTable):
data = d_(List())
column_name = d_(Unicode())
selected = d_(List())
show_column_labels = True
stretch_last_section = True
def get_columns(self):
return [self.column_name]
def get_data(self, row_index, column_index):
return self.data[row_index]
def set_data(self, row_index, column_index, value):
value = self.coerce_to_type(column_index, value)
self.data[row_index] = value
def get_default_row(self):
values = super().get_default_row()
return values[0]
def insert_row(self, row_index):
value = self.get_default_row()
self.data.insert(row_index+1, value)
def remove_row(self, row_index):
self.data.pop(row_index)
def _observe_selected_coords(self, event):
self.selected = [self.data[r] for r, c in self.selected_coords]
| {
"content_hash": "e4984e995209f864b564e56b9d887a49",
"timestamp": "",
"source": "github",
"line_count": 661,
"max_line_length": 79,
"avg_line_length": 32.127080181543114,
"alnum_prop": 0.5861273309474477,
"repo_name": "bburan/psiexperiment",
"id": "479cf772e8280708dd6cf514130dc5430af9719e",
"size": "21452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psi/core/enaml/editable_table_widget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "468917"
}
],
"symlink_target": ""
} |
"""
dj-stripe Exceptions.
"""
class MultipleSubscriptionException(Exception):
"""Raised when a Customer has multiple Subscriptions and only one is expected."""
pass
class StripeObjectManipulationException(Exception):
"""Raised when an attempt to manipulate a non-standalone stripe object is made not through its parent object."""
pass
| {
"content_hash": "1edcd4ee07241456265bc9539af963cd",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 113,
"avg_line_length": 23,
"alnum_prop": 0.7739130434782608,
"repo_name": "kavdev/dj-stripe",
"id": "602e727811032854f4150e781cf98086871531ae",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djstripe/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6190"
},
{
"name": "Python",
"bytes": "619260"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os.path
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from app_manifest.manifest import (get_app_manifest_from_file,
get_app_manifest_from_string,
get_app_manifest_from_settings)
TEST_STRING = """<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="escape-me" content="web%22%2C%22">
<link rel="stylesheet" href="styles/026d5f65.main.css">
</head>
<body>
<script src="scripts/f11d3906.components.js"></script>
<script src="scripts/2ac77420.templates.js"></script>
<script src="scripts/6747c2ff.main.js"></script>
<script src="scripts/d41d8cd9.plugins.js"></script>
</body>
</html>
"""
TEST_STRING_CSS = [
'styles/026d5f65.main.css'
]
TEST_STRING_META = [
{'charset': 'utf-8'},
{'http-equiv': 'X-UA-Compatible', 'content': 'IE=edge'},
{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'},
{'name': 'escape-me', 'content': 'web%22%2C%22'}
]
TEST_STRING_SCRIPTS = [
'scripts/f11d3906.components.js',
'scripts/2ac77420.templates.js',
'scripts/6747c2ff.main.js',
'scripts/d41d8cd9.plugins.js'
]
TEST_FILE_INDEX_CSS = [
'styles/index.main.css'
]
TEST_FILE_INDEX_META = [
{'charset': 'utf-8'},
{'http-equiv': 'X-UA-Compatible', 'content': 'IE=edge'},
{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'},
{'name': 'escape-me', 'content': 'web%22%2C%22'}
]
TEST_FILE_INDEX_SCRIPTS = [
'scripts/index.components.js',
'scripts/index.templates.js',
'scripts/index.main.js',
'scripts/index.plugins.js'
]
@override_settings(STATIC_ROOT=os.path.join(os.path.dirname(__file__), 'manifestfiles'))
class ManifestTest(TestCase):
def test_from_string(self):
"""Test that the index view renders and uses the correct templates."""
result = get_app_manifest_from_string(TEST_STRING)
self.assertEqual(result.cssfiles, TEST_STRING_CSS)
self.assertEqual(result.meta, TEST_STRING_META)
self.assertEqual(result.scriptfiles, TEST_STRING_SCRIPTS)
def test_from_file(self):
"""Getting from a file works correctly."""
result = get_app_manifest_from_file('index.html')
self.assertEqual(result.cssfiles, TEST_FILE_INDEX_CSS)
self.assertEqual(result.meta, TEST_FILE_INDEX_META)
self.assertEqual(result.scriptfiles, TEST_FILE_INDEX_SCRIPTS)
def test_file_not_found(self):
"""Getting from a non-existing file throws an IOError"""
with self.assertRaises(IOError):
get_app_manifest_from_file("filenotfound.html")
@override_settings(APP_MANIFEST_PATH='index.html')
def test_from_settings(self):
"""Getting from a settings path works similarly to files"""
result = get_app_manifest_from_settings()
self.assertEqual(result.cssfiles, TEST_FILE_INDEX_CSS)
self.assertEqual(result.meta, TEST_FILE_INDEX_META)
self.assertEqual(result.scriptfiles, TEST_FILE_INDEX_SCRIPTS)
@override_settings(APP_MANIFEST_PATH=None)
def test_from_settings_disabled(self):
"""Not specifying an app manifest returns an empty manifest"""
result = get_app_manifest_from_settings()
self.assertEqual(result.cssfiles, [])
self.assertEqual(result.scriptfiles, [])
@override_settings(APP_MANIFEST_PATH='filenotfound.html')
def test_from_invalid_settings(self):
with self.assertRaises(ImproperlyConfigured):
get_app_manifest_from_settings()
| {
"content_hash": "04bdbe4fd53b7293ddc7befe19ccaa21",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 88,
"avg_line_length": 35.71296296296296,
"alnum_prop": 0.6556909515167229,
"repo_name": "JoltLabs/app-manifest",
"id": "78cfe342fbfa89fa02f4198688e04ea3fef010ad",
"size": "3857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_manifest/tests/manifest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13533"
}
],
"symlink_target": ""
} |
import os
import jinja2
import sys
import argparse
import pkg_resources
import concurrent.futures
import operator
import re
import shutil
import collections
import stat
import lamnfyc.settings
import lamnfyc.utils
from lamnfyc.logger import (log, start_file_log)
__version__ = pkg_resources.get_distribution('lamnfyc').version
class ArgumentParser(argparse.ArgumentParser):
def parse_known_args(self, *_args, **kwargs):
args, namespace = super(ArgumentParser, self).parse_known_args(*_args, **kwargs)
if args.init:
if os.path.exists(self.config_default):
raise self.error('File {} already exists.'.format(self.config_default))
shutil.copyfile(os.path.join(lamnfyc.settings.BASE_PATH, self.default_name), self.config_default)
sys.exit(0)
elif not args.environment:
self.error("the environment name is required")
if not os.path.exists(args.config):
raise self.error('{} does not exist'.format(args.config))
return args, namespace
def parser():
parser = ArgumentParser(description='LAMNFYC. v.{}'.format(__version__))
parser.default_name = default_name = 'lamnfyc.yaml'
parser.config_default = config_default = os.path.join(os.getcwd(), default_name)
parser.add_argument('-c', '--config', default=config_default,
help='path to the config file, [default: {}]'.format(config_default))
parser.add_argument('environment', nargs='?', help='path to the environment')
parser.add_argument('--init', action='store_true',
help='creates a {} file inside your current working directory'.format(default_name))
parser.add_argument('--prompt-all', action='store_true', default=False,
help='prompt me for every option, don\'t default anything')
parser.add_argument('--reuse', action='store_true', default=False, help=argparse.SUPPRESS)
parser.add_argument('--version', action='version', version='%(prog)s (version {})'.format(__version__))
parser.add_argument(
'-v', '--verbosity', action='store', dest='verbosity', default=20,
type=int, choices=[10, 20, 0],
help='Verbosity level; 0=normal output, 10=DEBUG, 20=INFO',
)
return parser
def main():
args, _ = parser().parse_known_args()
return _main(args)
def _main(args):
environment_config = lamnfyc.utils.Configuration(args.config)
# need the absolute path to the environment
lamnfyc.settings.environment_path = os.path.abspath(os.path.join(os.path.abspath(os.path.curdir),
args.environment).rstrip('/'))
# sets up the system path local to where the yaml file is so you can import the pre/post hooks
sys.path.insert(0, os.path.dirname(os.path.abspath(args.config)))
# set the logging level to console only
log.handlers[0].setLevel(args.verbosity)
# create the cache dir if its missing
if not os.path.isdir(lamnfyc.settings.CACHE_PATH):
os.mkdir(lamnfyc.settings.CACHE_PATH)
if not args.reuse:
log.debug('Starting environment: {}'.format(lamnfyc.settings.environment_path))
# error out if the environment already exists
if os.path.isdir(lamnfyc.settings.environment_path):
log.fatal('ERROR: File already exists and is not a directory.')
log.fatal('Please provide a different path or delete the directory.')
sys.exit(3)
# make sure all the paths exists
os.mkdir(lamnfyc.settings.environment_path)
# Standard unix installation structure
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'lib'))
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'bin'))
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'share'))
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'include'))
# Appended structure, to house configuration files, logs, and sock/run files
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'conf'))
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'logs'))
os.mkdir(os.path.join(lamnfyc.settings.environment_path, 'run'))
else:
log.warn('Reuse mode enabled, this is not fully supported')
# initialize the file level logging
start_file_log(lamnfyc.settings.environment_path)
if environment_config.preinstall_hook:
environment_config.preinstall_hook()
environment_config.prompt_missing(missing_only=not args.prompt_all)
kwargs = {
'environment_path': lamnfyc.settings.environment_path,
'enironment_variables': variable_order(environment_config.env),
'unset_variables': ' '.join(environment_config.env.keys())
}
path = os.path.join(lamnfyc.settings.BASE_PATH, 'templates')
files = [os.path.join(root, file) for root, dir, files in os.walk(path) for file in files]
for file in files:
file_path = os.path.join(lamnfyc.settings.environment_path, file.replace(path + os.path.sep, ''))
with open(file_path, 'w') as file_out:
file_out.write(jinja2.Template(open(file).read()).render(**kwargs))
# If it goes inside /bin then give it exec permissions
if file_path.replace(lamnfyc.settings.environment_path + os.path.sep, '').split(os.path.sep)[0] == 'bin':
os.chmod(file_path, os.stat(file).st_mode | stat.S_IEXEC)
# after all the environment variables have been written, lets read them back up to get nice and clean values
# without any $VARIABLE in them
environment_config.reload_env(lamnfyc.settings.environment_path)
# generate all the packages we need to download
downloads = []
for package_item in environment_config.packages:
package = lamnfyc.utils.import_package(package_item['name'], package_item['version'])
package.environment_vars = environment_config.env
downloads.append(package)
for subpackage in package.dependencies():
downloads.append(subpackage)
subpackage.environment_vars = environment_config.env
# download all the packages that are missing
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
futures = executor.map(lambda package: package.download() if not package.cache_exists else None, downloads)
for future in futures:
# if any of the futures threw an error it will raise them here and thus the program will halt
continue
# Install all packages, uppermost first, meaning;
# If say Package1 depends on Package2 which in turn that depends on Package3, the order or the install will be:
# Package3 gets installed first, then Package2, and lastly Package1
for package_item in environment_config.packages:
package = lamnfyc.utils.import_package(package_item['name'], package_item['version'])
for subpackage in package.dependencies():
subpackage.expand()
package.expand()
if environment_config.postinstall_callback:
environment_config.postinstall_callback()
def variable_order(items):
FIND = re.compile('\$([\w]+)')
ready = collections.OrderedDict()
ready['VIRTUAL_ENV'] = None
ready['USER'] = None
passes = 0
while True:
group = {}
passes += 1
for key, value in items.iteritems():
if key in ready:
continue
if '$' in (str(value) or ''):
groups = FIND.findall(value)
counter = 0
for _key in groups:
if _key in ready:
counter += 1
if counter == len(groups):
group[key] = value
else:
group[key] = value
for key, value in sorted(group.items(), key=operator.itemgetter(0)):
ready[key] = value
yield key, value
if len(items.keys()) == (len(ready.keys()) - 2):
break
elif passes > 10:
raise Exception('Weird nesting going on, could not find dependencies for: {}'.format(
', '.join(set(items.keys()) - set(ready.keys()))
))
| {
"content_hash": "17f165081d91087c1d4ffa400aee0306",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 115,
"avg_line_length": 41.54271356783919,
"alnum_prop": 0.6466674730857627,
"repo_name": "kingbuzzman/lamnfyc",
"id": "99671201f657cdce21d4e78ef4864aea7b887198",
"size": "8267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lamnfyc/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63170"
},
{
"name": "Shell",
"bytes": "4623"
}
],
"symlink_target": ""
} |
import copy
import hashlib
import logging
import re
from urllib.parse import parse_qs
from urllib.parse import urlparse
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.dash import dashparse
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.text import decode_html_entities
class OppetArkiv(Service, OpenGraphThumbMixin):
supported_domains = ["oppetarkiv.se"]
def get(self):
vid = self.find_video_id()
if vid is None:
yield ServiceError("Cant find video id for this video")
return
url = "http://api.svt.se/videoplayer-api/video/{}".format(vid)
data = self.http.request("get", url)
if data.status_code == 404:
yield ServiceError("Can't get the json file for {}".format(url))
return
data = data.json()
if "live" in data:
self.config.set("live", data["live"])
self.outputfilename(data)
if "subtitleReferences" in data:
for i in data["subtitleReferences"]:
if i["format"] == "websrt":
yield subtitle(copy.copy(self.config), "wrst", i["url"], output=self.output)
if i["format"] == "webvtt" and "url" in i:
yield subtitle(copy.copy(self.config), "wrst", i["url"], output=self.output)
if len(data["videoReferences"]) == 0:
yield ServiceError("Media doesn't have any associated videos (yet?)")
return
for i in data["videoReferences"]:
parse = urlparse(i["url"])
query = parse_qs(parse.query)
if i["format"] == "hls" or i["format"] == "ios":
streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output)
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = hlsparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output)
for n in list(streams.keys()):
yield streams[n]
if i["format"] == "hds" or i["format"] == "flash":
match = re.search(r"\/se\/secure\/", i["url"])
if not match:
streams = hdsparse(self.config, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}), i["url"], output=self.output)
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = hdsparse(
self.config,
self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}),
alt.request.url,
output=self.output,
)
for n in list(streams.keys()):
yield streams[n]
if i["format"] == "dash264" or i["format"] == "dashhbbtv":
streams = dashparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output)
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = dashparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output)
for n in list(streams.keys()):
yield streams[n]
def find_video_id(self):
match = re.search('data-video-id="([^"]+)"', self.get_urldata())
if match:
return match.group(1)
return None
def find_all_episodes(self, config):
page = 1
data = self.get_urldata()
match = re.search(r'"/etikett/titel/([^"/]+)', data)
if match is None:
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
if match is None:
logging.error("Couldn't find title")
return
program = match.group(1)
episodes = []
n = 0
if self.config.get("all_last") > 0:
sort = "tid_fallande"
else:
sort = "tid_stigande"
while True:
url = "http://www.oppetarkiv.se/etikett/titel/{}/?sida={}&sort={}&embed=true".format(program, page, sort)
data = self.http.request("get", url)
if data.status_code == 404:
break
data = data.text
regex = re.compile(r'href="(/video/[^"]+)"')
for match in regex.finditer(data):
if n == self.config.get("all_last"):
break
episodes.append("http://www.oppetarkiv.se{}".format(match.group(1)))
n += 1
page += 1
return episodes
def outputfilename(self, data):
id = hashlib.sha256(data["programVersionId"].encode("utf-8")).hexdigest()[:7]
self.output["id"] = id
datatitle = re.search('data-title="([^"]+)"', self.get_urldata())
if not datatitle:
return None
datat = decode_html_entities(datatitle.group(1))
self.output["title"] = self.name(datat)
self.seasoninfo(datat)
def seasoninfo(self, data):
match = re.search(r"S.song (\d+) - Avsnitt (\d+)", data)
if match:
self.output["season"] = int(match.group(1))
self.output["episode"] = int(match.group(2))
else:
match = re.search(r"Avsnitt (\d+)", data)
if match:
self.output["episode"] = int(match.group(1))
def name(self, data):
if data.find(" - S.song") > 0:
title = data[: data.find(" - S.song")]
else:
if data.find(" - Avsnitt") > 0:
title = data[: data.find(" - Avsnitt")]
else:
title = data
return title
| {
"content_hash": "61b9df08725523c2cfb6d6279054aa6f",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 145,
"avg_line_length": 40.333333333333336,
"alnum_prop": 0.5084175084175084,
"repo_name": "olof/debian-svtplay-dl",
"id": "be4418d052b914e856a667ed7fd8f987d69821b9",
"size": "6623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/svtplay_dl/service/oppetarkiv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "380"
},
{
"name": "Makefile",
"bytes": "2930"
},
{
"name": "Python",
"bytes": "393738"
},
{
"name": "Shell",
"bytes": "2423"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "avocado.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "0969e51b25789fde7092f8dda1102153",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6206896551724138,
"repo_name": "trdwll/avocado",
"id": "9fa0042f890ec79d61b097e8035a0209e04521df",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "377"
},
{
"name": "HTML",
"bytes": "10437"
},
{
"name": "Python",
"bytes": "9928"
}
],
"symlink_target": ""
} |
import socket
import errno
import six
def server_socket(address, backlog=5):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(address)
server.listen(backlog)
server.setblocking(False)
return server
def client_socket(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
in_blocking = False
try:
sock.connect(address)
except Exception as ex:
if ex.args[0] in [errno.EWOULDBLOCK, errno.EINPROGRESS]:
in_blocking = True
else:
six.raise_from(ex, None)
return (sock, in_blocking)
| {
"content_hash": "2cb11a2abbc33476f2c1a17635f124c8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 64,
"avg_line_length": 24.75,
"alnum_prop": 0.6681096681096681,
"repo_name": "fifman/sockspy",
"id": "9c3a5d06fd91c9887e9a8e539e9249ae01090860",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sockspy/socket_tools/raw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2269"
},
{
"name": "Python",
"bytes": "53026"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipe_app', '0002_auto_20161013_0418'),
]
operations = [
migrations.RenameField(
model_name='reciperating',
old_name='recipe_name',
new_name='recipe',
),
]
| {
"content_hash": "bfa5c21caf08dce16671cb11144e7c4f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 50,
"avg_line_length": 20.5,
"alnum_prop": 0.5826558265582655,
"repo_name": "mpollachek/Recipe_website_project",
"id": "c7954b24478f83fc4424ce9f33ee6786393afd4b",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/recipe_app/migrations/0003_auto_20161016_2307.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3054"
},
{
"name": "HTML",
"bytes": "118572"
},
{
"name": "JavaScript",
"bytes": "251294"
},
{
"name": "Python",
"bytes": "37569"
}
],
"symlink_target": ""
} |
answer1 = widget_inputs["check1"]
answer2 = widget_inputs["check2"]
answer3 = widget_inputs["check3"]
answer4 = widget_inputs["check4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer1 == True:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Check the first answer. If the images have the same resolution with different file sizes, then compression might be correct.")
if answer2 == False:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the second answer. Are the images the same size on the page? If so, the display resolutions are the same.")
if answer3 == False:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the third answer. Do the original images have the same resolutions? Open up DevTools to find out.")
if answer4 == False:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the fourth answer. Are the file types different? Open up DevTools to find out.")
if is_correct:
commentizer("Nice job! Don't forget to compress your images!")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | {
"content_hash": "d18e91e71fdbc75e1ea59bc1ff841794",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 143,
"avg_line_length": 32.09756097560975,
"alnum_prop": 0.7127659574468085,
"repo_name": "udacity/responsive-images",
"id": "d6a3847fe8faa3f9488df942999caa5dbe6bf994",
"size": "1316",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "grading_scripts/2_19_q.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17651"
},
{
"name": "HTML",
"bytes": "46303"
},
{
"name": "JavaScript",
"bytes": "8515"
},
{
"name": "Python",
"bytes": "10560"
},
{
"name": "Shell",
"bytes": "1395"
}
],
"symlink_target": ""
} |
import copy
import uuid
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-
from packaging.version import Version
import numpy as np
# LOCAL
from astropy import log
from astropy.io import fits
from . import docstrings
from . import _wcs
from astropy import units as u
from astropy.utils.compat import possible_filename
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
from astropy.utils.decorators import deprecated_renamed_argument
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm', 'Auxprm',
'Celprm', 'Prjprm', 'Wtbarr', 'WCSBase', 'validate', 'WcsError',
'SingularMatrixError', 'InconsistentAxisTypesError',
'InvalidTransformError', 'InvalidCoordinateError',
'InvalidPrjParametersError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
if _wcs is not None:
if Version(_wcs.__version__) < Version("5.8"):
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
_WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8")
_WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1")
_WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB_', 'WCSHDR_', 'WCSHDO_', 'WCSCOMPARE_', 'PRJ_')):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == 'c' and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
_WCSSUB_TIME_SUPPORT = False
_WCS_TPD_WARN_LT71 = False
_WCS_TPD_WARN_LT74 = False
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
""" # noqa: E501
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
'keysel': copy.copy(keysel),
'colsel': copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError("'fobj' must be either None or an "
"astropy.io.fits.HDUList object.")
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False,
hdulist=fobj)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({:d}) than the "
"image it is associated with ({:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname]
# Restore the original CNAMEs
copy.wcs.cname = ['' if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple([None if i is None else self.pixel_shape[i] for i in keep])
if self.pixel_bounds:
copy.pixel_bounds = [None if i is None else self.pixel_bounds[i] for i in keep]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (key == 'datfix' and '1858-11-17' in val and
not np.count_nonzero(self.wcs.mjdref)):
continue
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header['AXISCORR']
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
'An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['D2IMARR', d_extver].data
else:
d_data = (fobj['D2IMARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['D2IMARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[('D2IMARR', 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[('D2IMARR', 1)].header
naxis = d2im_hdr['NAXIS']
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get('CRPIX' + str(i), 0.0)
crval[i - 1] = d2im_hdr.get('CRVAL' + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get('CDELT' + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(det2im.data.shape), 'Number of independent variables in D2IM function')
for i in range(det2im.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = (
i + 1, f'Axis number of the {jth} variable in a D2IM function')
image = fits.ImageHDU(det2im.data, name='D2IMARR')
header = image.header
header['CRPIX1'] = (det2im.crpix[0],
'Coordinate system reference pixel')
header['CRPIX2'] = (det2im.crpix[1],
'Coordinate system reference pixel')
header['CRVAL1'] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header['CRVAL2'] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header['CDELT1'] = (det2im.cdelt[0],
'Coordinate increment along axis')
header['CDELT2'] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = 'DP'
err_kw = 'CPERR'
else:
d_kw = 'DQ'
err_kw = 'CQERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['WCSDVARR', d_extver].data
else:
d_data = (fobj['WCSDVARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['WCSDVARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0),
d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0),
d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0),
d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = 'DP'
else:
d_kw = 'DQ'
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(cpdis.data.shape), f'Number of independent variables in {dist} function')
for i in range(cpdis.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = (
i + 1,
f'Axis number of the {jth} variable in a {dist} function')
image = fits.ImageHDU(cpdis.data, name='WCSDVARR')
header = image.header
header['CRPIX1'] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header['CRPIX2'] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header['CRVAL1'] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header['CRVAL2'] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header['CDELT1'] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header['CDELT2'] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in set(m.group() for m in map(SIP_KW.match, list(header))
if m is not None):
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header['A_ORDER'] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header['A_ORDER']
del header['B_ORDER']
ctype = [header[f'CTYPE{nax}{wcskey}'] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
""" # noqa: E501
log.info(message)
elif "B_ORDER" in header and header['B_ORDER'] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if "AP_ORDER" in header and header['AP_ORDER'] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header['AP_ORDER']
del header['BP_ORDER']
elif "BP_ORDER" in header and header['BP_ORDER'] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = 'sky to detector' if name[-1] == 'P' else 'detector to sky'
comment = ('SIP polynomial order, axis {:d}, {:s}'
.format(ord(name[0]) - ord('A'), trdir))
keywords[f'{name}_ORDER'] = size - 1, comment
comment = 'SIP distortion coefficient'
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
f'{name}_{i:d}_{j:d}'] = a[i, j], comment
write_array('A', self.sip.a)
write_array('B', self.sip.b)
write_array('AP', self.sip.ap)
write_array('BP', self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {})".format(self.naxis))
if 0 in xy.shape:
return xy
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{}], origin)".format(self.naxis))
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
@deprecated_renamed_argument('accuracy', 'tolerance', '4.3')
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [value * unit for (value, unit) in zip(values, units)] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext # noqa: F821
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext # noqa: F821
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD':
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB "
f"{_wcs.__version__} is writing this in a format incompatible with "
f"current versions - please update to 7.4 or use the bundled WCSLIB.",
AstropyWarning)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD':
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which requires WCSLIB "
f"7.4 or later to store in a FITS header (having {_wcs.__version__}).",
AstropyWarning)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = f'CTYPE{i}{self.wcs.alt}'.strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write(f'{coordsys}\n')
f.write('polygon(')
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=',')
f.write(f') # color={color}, width={width:d} \n')
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header[f'NAXIS{naxis}'])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
f"Number of WCS axes: {self.naxis!r}"]
sfmt = ' : ' + "".join(["{"+f"{i}"+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct['_alt_wcskey'] = self.wcs.alt
return (__WCS_unpickle__,
(self.__class__, dct, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]) # Defined by C-ext # noqa: F821 E501
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{} attribute is not updated because at "
"least one index ('{}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext # noqa: F821
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext # noqa: F821
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {_wcs.__version__}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext # noqa: F821
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (self.sip is not None or
self.cpdis1 is not None or self.cpdis2 is not None or
self.det2im1 is not None and self.det2im2 is not None)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', 'cdelt will be ignored since cd is present', RuntimeWarning)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop('naxis', None)
if naxis:
hdulist[0].header['naxis'] = naxis
naxes = dct.pop('_naxis', [])
for k, na in enumerate(naxes):
hdulist[0].header[f'naxis{k + 1:d}'] = na
kwargs = dct.pop('_init_kwargs', {})
self.__dict__.update(dct)
wcskey = dct.pop('_alt_wcskey', ' ')
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get('_pixel_bounds', None)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f' ({self._hdu_name})'
else:
hdu_name = ''
result = [f'HDU {self._hdu_index}{hdu_name}:']
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
| {
"content_hash": "7043804f861a927a0ae4590aeb856263",
"timestamp": "",
"source": "github",
"line_count": 3510,
"max_line_length": 153,
"avg_line_length": 37.946438746438744,
"alnum_prop": 0.5385008108595111,
"repo_name": "saimn/astropy",
"id": "c6fa9ce5d136d0056edc0c403af53e609248eaf8",
"size": "134251",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/wcs/wcs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12214998"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from django.conf import settings
from openstack_dashboard.api import base
from openstack_dashboard.api import neutron
from gbpclient.v2_0 import client as gbp_client
LOG = logging.getLogger(__name__)
def gbpclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('gbpclient connection created using token "%s" and url "%s"'
% (request.user.token.id, base.url_for(request, 'network')))
LOG.debug('user_id=%(user)s, tenant_id=%(tenant)s' %
{'user': request.user.id, 'tenant': request.user.tenant_id})
c = gbp_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
class PT(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron endpoint group."""
def get_dict(self):
pt_dict = self._apidict
pt_dict['ep_id'] = pt_dict['id']
return pt_dict
class PTG(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron endpoint group."""
def get_dict(self):
epg_dict = self._apidict
epg_dict['epg_id'] = epg_dict['id']
return epg_dict
class Contract(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron policy_rule_set."""
def get_dict(self):
policy_rule_set_dict = self._apidict
policy_rule_set_dict['policy_rule_set_id'] = policy_rule_set_dict['id']
return policy_rule_set_dict
class PolicyRule(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron policy rule."""
def get_dict(self):
policyrule_dict = self._apidict
policyrule_dict['policyrule_dict_id'] = policyrule_dict['id']
return policyrule_dict
class PolicyClassifier(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron classifier."""
def get_dict(self):
classifier_dict = self._apidict
classifier_dict['classifier_id'] = classifier_dict['id']
return classifier_dict
class PolicyAction(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron action."""
def get_dict(self):
action_dict = self._apidict
action_dict['action_id'] = action_dict['id']
return action_dict
class L2Policy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron l2policy."""
def get_dict(self):
policy_dict = self._apidict
policy_dict['policy_id'] = policy_dict['id']
return policy_dict
class NetworkServicePolicy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron network service policy."""
def get_dict(self):
policy_dict = self._apidict
return policy_dict
class ServiceChainSpec(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service chain spec."""
def get_dict(self):
sc_spec_dict = self._apidict
return sc_spec_dict
class ServiceChainNode(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service chain spec."""
def get_dict(self):
sc_node_dict = self._apidict
return sc_node_dict
class ServiceChainInstance(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service chain spec."""
def get_dict(self):
sc_instance_dict = self._apidict
return sc_instance_dict
def policy_target_create(request, **kwargs):
body = {'policy_target_group': kwargs}
policy_target = gbpclient(request).create_policy_target_group(
body).get('endpoint_group')
return PTG(policy_target)
def pt_create(request, **kwargs):
body = {'policy_target': kwargs}
pt = gbpclient(request).create_policy_target(body).get('policy_target')
return PTG(pt)
def pt_list(request, **kwargs):
policy_targets = gbpclient(request).list_policy_targets(
**kwargs).get('policy_targets')
return [PT(pt) for pt in policy_targets]
def pt_delete(request, pt_id):
gbpclient(request).delete_policy_target(pt_id)
def policy_target_list(request, **kwargs):
policy_targets = gbpclient(request).list_policy_target_groups(
**kwargs).get('policy_target_groups')
return [PTG(policy_target) for policy_target in policy_targets]
def policy_target_get(request, policy_target_id):
policy_target = gbpclient(request).show_policy_target_group(
policy_target_id).get('policy_target_group')
return PTG(policy_target)
def policy_target_delete(request, policy_target_id):
gbpclient(request).delete_policy_target_group(policy_target_id)
def policy_target_update(request, policy_target_id, **kwargs):
body = {'policy_target_group': kwargs}
policy_target = gbpclient(request).update_policy_target_group(
policy_target_id, body).get('policy_target_group')
return PTG(policy_target)
def policy_rule_set_create(request, **kwargs):
body = {'policy_rule_set': kwargs}
policy_rule_set = gbpclient(request).create_policy_rule_set(
body).get('policy_rule_set')
return Contract(policy_rule_set)
def policy_rule_set_list(request, **kwargs):
policy_rule_sets = gbpclient(request).list_policy_rule_sets(
**kwargs).get('policy_rule_sets')
return [Contract(policy_rule_set) for policy_rule_set in policy_rule_sets]
def policy_rule_set_get(request, policy_rule_set_id):
policy_rule_set = gbpclient(request).show_policy_rule_set(
policy_rule_set_id).get('policy_rule_set')
return Contract(policy_rule_set)
def policy_rule_set_delete(request, policy_rule_set_id):
gbpclient(request).delete_policy_rule_set(policy_rule_set_id)
def policy_rule_set_update(request, policy_rule_set_id, **kwargs):
body = {'policy_rule_set': kwargs}
policy_rule_set = gbpclient(request).update_policy_rule_set(
policy_rule_set_id, body).get('policy_rule_set')
return Contract(policy_rule_set)
def policyrule_create(request, **kwargs):
body = {'policy_rule': kwargs}
policy_rule = gbpclient(request).create_policy_rule(
body).get('policy_rule')
return PolicyRule(policy_rule)
def policyrule_update(request, prid, **kwargs):
body = {'policy_rule': kwargs}
policy_rule = gbpclient(request).update_policy_rule(prid,
body).get('policy_rule')
return PolicyRule(policy_rule)
def policyrule_list(request, **kwargs):
policyrules = gbpclient(request).list_policy_rules(
**kwargs).get('policy_rules')
return [PolicyRule(pr) for pr in policyrules]
def policyclassifier_create(request, **kwargs):
body = {'policy_classifier': kwargs}
classifier = gbpclient(request).create_policy_classifier(
body).get('policy_classifier')
return PolicyClassifier(classifier)
def policyclassifier_list(request, **kwargs):
classifiers = gbpclient(request).list_policy_classifiers(
**kwargs).get('policy_classifiers')
return [PolicyClassifier(pc) for pc in classifiers]
def policyaction_create(request, **kwargs):
body = {'policy_action': kwargs}
action = gbpclient(request).create_policy_action(
body).get('policy_action')
return PolicyAction(action)
def policyaction_list(request, **kwargs):
actions = gbpclient(request).list_policy_actions(
**kwargs).get('policy_actions')
return [PolicyAction(pa) for pa in actions]
def policyaction_delete(request, pa_id):
gbpclient(request).delete_policy_action(pa_id)
def policyaction_get(request, pa_id):
policyaction = gbpclient(request).show_policy_action(
pa_id).get('policy_action')
return PolicyAction(policyaction)
def policyaction_update(request, pc_id, **kwargs):
body = {'policy_action': kwargs}
classifier = gbpclient(request).update_policy_action(pc_id,
body).get('policy_action')
return PolicyClassifier(classifier)
def policyrule_get(request, pr_id):
policyrule = gbpclient(request).show_policy_rule(
pr_id).get('policy_rule')
return PolicyRule(policyrule)
def policyrule_delete(request, pr_id):
return gbpclient(request).delete_policy_rule(pr_id)
def policyclassifier_get(request, pc_id):
policyclassifier = gbpclient(request).show_policy_classifier(
pc_id).get('policy_classifier')
return PolicyClassifier(policyclassifier)
def policyclassifier_delete(request, pc_id):
gbpclient(request).delete_policy_classifier(pc_id)
def policyclassifier_update(request, pc_id, **kwargs):
body = {'policy_classifier': kwargs}
classifier = gbpclient(request).update_policy_classifier(pc_id,
body).get('policy_classifier')
return PolicyClassifier(classifier)
def l3policy_list(request, **kwargs):
policies = gbpclient(request).list_l3_policies(**kwargs).get('l3_policies')
return [L2Policy(item) for item in policies]
def l2policy_list(request, **kwargs):
policies = gbpclient(request).list_l2_policies(**kwargs).get('l2_policies')
return [L2Policy(item) for item in policies]
def networkservicepolicy_list(request, **kwargs):
policies = gbpclient(request).list_network_service_policies(
**kwargs).get('network_service_policies')
return [NetworkServicePolicy(item) for item in policies]
def create_networkservice_policy(request, **kwargs):
body = {'network_service_policy': kwargs}
spolicy = gbpclient(request).create_network_service_policy(
body).get('network_service_policy')
return NetworkServicePolicy(spolicy)
def update_networkservice_policy(request, policy_id, **kwargs):
body = {'network_service_policy': kwargs}
spolicy = gbpclient(request).update_network_service_policy(
policy_id, body).get('network_service_policy')
return NetworkServicePolicy(spolicy)
def delete_networkservice_policy(request, policy_id, **kwargs):
gbpclient(request).delete_network_service_policy(policy_id)
def get_networkservice_policy(request, policy_id):
spolicy = gbpclient(request).show_network_service_policy(
policy_id).get('network_service_policy')
return NetworkServicePolicy(spolicy)
def l3policy_get(request, pc_id, **kwargs):
return gbpclient(request).show_l3_policy(pc_id).get('l3_policy')
def l3policy_create(request, **kwargs):
body = {'l3_policy': kwargs}
return gbpclient(request).create_l3_policy(body).get('l3_policy')
def l3policy_delete(request, policy_id):
gbpclient(request).delete_l3_policy(policy_id)
def l2policy_get(request, pc_id, **kwargs):
return L2Policy(gbpclient(request).show_l2_policy(pc_id).get('l2_policy'))
def l2policy_create(request, **kwargs):
body = {'l2_policy': kwargs}
policy = gbpclient(request).create_l2_policy(body).get('l2_policy')
return L2Policy(policy)
def l2policy_update(request, pc_id, **kwargs):
body = {'l2_policy': kwargs}
policy = gbpclient(request).update_l2_policy(pc_id, body).get('l2_policy')
return L2Policy(policy)
def l2policy_delete(request, policy_id):
gbpclient(request).delete_l2_policy(policy_id)
def servicechainnode_list(request, **kwargs):
sc_nodes = gbpclient(request).list_servicechain_nodes(
**kwargs).get('servicechain_nodes')
return [ServiceChainNode(item) for item in sc_nodes]
def servicechainspec_list(request, **kwargs):
sc_specs = gbpclient(request).list_servicechain_specs(
**kwargs).get('servicechain_specs')
return [ServiceChainSpec(item) for item in sc_specs]
def servicechaininstance_list(request, **kwargs):
sc_instances = gbpclient(request).list_servicechain_instances(
**kwargs).get('servicechain_instances')
return [ServiceChainInstance(item) for item in sc_instances]
def get_servicechain_node(request, scnode_id):
scnode = gbpclient(request).show_servicechain_node(
scnode_id).get('servicechain_node')
return ServiceChainNode(scnode)
def create_servicechain_node(request, **kwargs):
body = {'servicechain_node': kwargs}
sc_node = gbpclient(request).create_servicechain_node(
body).get('servicechain_node')
return ServiceChainNode(sc_node)
def update_servicechain_node(request, scnode_id, **kwargs):
body = {'servicechain_node': kwargs}
sc_node = gbpclient(request).update_servicechain_node(
scnode_id, body).get('servicechain_node')
return ServiceChainNode(sc_node)
def delete_servicechain_node(request, scnode_id):
gbpclient(request).delete_servicechain_node(scnode_id)
def get_servicechain_spec(request, scspec_id):
sc_spec = gbpclient(request).show_servicechain_spec(
scspec_id).get('servicechain_spec')
return ServiceChainSpec(sc_spec)
def create_servicechain_spec(request, **kwargs):
body = {'servicechain_spec': kwargs}
sc_spec = gbpclient(request).create_servicechain_spec(
body).get('servicechain_spec')
return ServiceChainSpec(sc_spec)
def update_servicechain_spec(request, scspec_id, **kwargs):
body = {'servicechain_spec': kwargs}
sc_spec = gbpclient(request).update_servicechain_spec(
scspec_id, body).get('servicechain_spec')
return ServiceChainSpec(sc_spec)
def delete_servicechain_spec(request, scspec_id):
gbpclient(request).delete_servicechain_spec(scspec_id)
def get_servicechain_instance(request, scinstance_id):
sc_instance = gbpclient(request).show_servicechain_instance(
scinstance_id).get('servicechain_instance')
return ServiceChainInstance(sc_instance)
def create_servicechain_instance(request, **kwargs):
body = {'servicechain_instance': kwargs}
sc_instance = gbpclient(request).create_servicechain_instance(
body).get('servicechain_instance')
return ServiceChainInstance(sc_instance)
def update_servicechain_instance(request, scinstance_id, **kwargs):
body = {'servicechain_instance': kwargs}
sc_instance = gbpclient(request).update_servicechain_instance(
scinstance_id, body).get('servicechain_instance')
return ServiceChainInstance(sc_instance)
def delete_servicechain_instance(request, scinstance_id):
gbpclient(request).delete_servicechain_instance(scinstance_id)
| {
"content_hash": "5b35d318716554c60810ce218a8ae78f",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 79,
"avg_line_length": 30.77006507592191,
"alnum_prop": 0.6979203383856186,
"repo_name": "promptworks/group-based-policy-ui",
"id": "ceee0fa09467fa9641df265cc142b8e96c7bd7d5",
"size": "14731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpui/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32887"
},
{
"name": "HTML",
"bytes": "46737"
},
{
"name": "JavaScript",
"bytes": "3971"
},
{
"name": "Python",
"bytes": "208580"
},
{
"name": "Shell",
"bytes": "16643"
}
],
"symlink_target": ""
} |
from django.forms import widgets as django_widgets
from django.template.loader import render_to_string
from leaflet.forms.widgets import LeafletWidget
from .helpers import wkt_to_geom
from .settings import API_SRID
class MapWidget(LeafletWidget):
geometry_field_class = 'MapEntity.GeometryField'
def render(self, name, value, attrs=None, renderer=None):
attrs = attrs or {}
attrs.update(geometry_field_class=self.geometry_field_class)
return super().render(name, value, attrs)
class HiddenGeometryWidget(django_widgets.HiddenInput):
def value_from_datadict(self, data, files, name):
"""
From WKT to Geometry (TODO: should be done in Field clean())
"""
wkt = super().value_from_datadict(data, files, name)
return None if not wkt else wkt_to_geom(wkt, silent=True)
def format_value(self, value):
"""
Before serialization, reprojects to API_SRID
"""
if value and not isinstance(value, str):
value.transform(API_SRID)
return value
class SelectMultipleWithPop(django_widgets.SelectMultiple):
def __init__(self, *args, **kwargs):
self.add_url = kwargs.pop('add_url')
super().__init__(*args, **kwargs)
def render(self, name, *args, **kwargs):
html = super().render(name, *args, **kwargs)
context = {'field': name, 'add_url': self.add_url}
popupplus = render_to_string("mapentity/popupplus.html", context)
return html + popupplus
| {
"content_hash": "0c34e97067318a7892b43dafc53a5c14",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 33.8,
"alnum_prop": 0.6561472715318869,
"repo_name": "makinacorpus/django-mapentity",
"id": "19bf3d090fae8eacb5104d1a34ee518e88d969f3",
"size": "1521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapentity/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43931"
},
{
"name": "Dockerfile",
"bytes": "743"
},
{
"name": "HTML",
"bytes": "97794"
},
{
"name": "JavaScript",
"bytes": "451716"
},
{
"name": "Python",
"bytes": "261566"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import curses
import sys
import signal
import processInput
import usageStrings
import output
import logger
from charCodeMapping import CODE_TO_CHAR
from colorPrinter import ColorPrinter
def signal_handler(signal, frame):
# from http://stackoverflow.com/a/1112350/948126
# Lets just quit rather than signal.SIGINT printing the stack
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
CHROME_MIN_X = 5
CHROME_MIN_Y = 0
SELECT_MODE = 'SELECT'
COMMAND_MODE = 'COMMAND_MODE'
SHORT_NAV_USAGE = '[f|A] selection, [down|j|up|k|space|b] navigation, [enter] open, [c] command mode'
SHORT_COMMAND_USAGE = 'command examples: | git add | git checkout HEAD~1 -- | mv $F ../here/ |'
SHORT_COMMAND_PROMPT = 'Type a command below! Files will be appended or replace $F'
SHORT_COMMAND_PROMPT2 = 'Enter a blank line to go back to the selection process'
SHORT_FILES_HEADER = 'Files you have selected:'
INVISIBLE_CURSOR = 0
BLOCK_CURSOR = 2
class HelperChrome(object):
def __init__(self, printer, screenControl):
self.printer = printer
self.screenControl = screenControl
self.WIDTH = 50
if self.getIsSidebarMode():
logger.addEvent('init_wide_mode')
else:
logger.addEvent('init_narrow_mode')
def output(self, mode):
self.mode = mode
for func in [self.outputSide, self.outputBottom, self.toggleCursor]:
try:
func()
except curses.error:
pass
def toggleCursor(self):
if self.mode == SELECT_MODE:
curses.curs_set(INVISIBLE_CURSOR)
else:
curses.curs_set(BLOCK_CURSOR)
def reduceMaxY(self, maxy):
if self.getIsSidebarMode():
return maxy
return maxy - 4
def reduceMaxX(self, maxx):
if not self.getIsSidebarMode():
return maxx
return maxx - self.WIDTH
def getMinX(self):
if self.mode == COMMAND_MODE:
return 0
return self.screenControl.getChromeBoundaries()[0]
def getMinY(self):
return self.screenControl.getChromeBoundaries()[1]
def getIsSidebarMode(self):
(maxy, maxx) = self.screenControl.getScreenDimensions()
return maxx > 200
def outputSide(self):
if not self.getIsSidebarMode():
return
(maxy, maxx) = self.screenControl.getScreenDimensions()
borderX = maxx - self.WIDTH
if (self.mode == COMMAND_MODE):
borderX = len(SHORT_COMMAND_PROMPT) + 20
usageLines = usageStrings.USAGE_PAGE.split('\n')
if self.mode == COMMAND_MODE:
usageLines = usageStrings.USAGE_COMMAND.split('\n')
for index, usageLine in enumerate(usageLines):
self.printer.addstr(self.getMinY() + index, borderX + 2, usageLine)
for y in range(self.getMinY(), maxy):
self.printer.addstr(y, borderX, '|')
def outputBottom(self):
if self.getIsSidebarMode():
return
(maxy, maxx) = self.screenControl.getScreenDimensions()
borderY = maxy - 2
# first output text since we might throw an exception during border
usageStr = SHORT_NAV_USAGE if self.mode == SELECT_MODE else SHORT_COMMAND_USAGE
borderStr = '_' * (maxx - self.getMinX() - 0)
self.printer.addstr(borderY, self.getMinX(), borderStr)
self.printer.addstr(borderY + 1, self.getMinX(), usageStr)
class ScrollBar(object):
def __init__(self, printer, lines, screenControl):
self.printer = printer
self.screenControl = screenControl
self.numLines = len(lines)
self.boxStartFraction = 0.0
self.boxStopFraction = 0.0
self.calcBoxFractions()
# see if we are activated
self.activated = True
(maxy, maxx) = self.screenControl.getScreenDimensions()
if (self.numLines < maxy):
self.activated = False
logger.addEvent('no_scrollbar')
else:
logger.addEvent('needed_scrollbar')
def getIsActivated(self):
return self.activated
def calcBoxFractions(self):
# what we can see is basically the fraction of our screen over
# total num lines
(maxy, maxx) = self.screenControl.getScreenDimensions()
fracDisplayed = min(1.0, (maxy / float(self.numLines)))
self.boxStartFraction = -self.screenControl.getScrollOffset() / float(
self.numLines)
self.boxStopFraction = self.boxStartFraction + fracDisplayed
def output(self):
if not self.activated:
return
for func in [self.outputCaps, self.outputBase, self.outputBox,
self.outputBorder]:
try:
func()
except curses.error:
pass
def getMinY(self):
return self.screenControl.getChromeBoundaries()[1] + 1
def getX(self):
return 0
def outputBorder(self):
x = self.getX() + 4
(maxy, maxx) = self.screenControl.getScreenDimensions()
for y in range(0, maxy):
self.printer.addstr(y, x, ' ')
def outputBox(self):
(maxy, maxx) = self.screenControl.getScreenDimensions()
topY = maxy - 2
minY = self.getMinY()
diff = topY - minY
x = self.getX()
boxStartY = int(diff * self.boxStartFraction) + minY
boxStopY = int(diff * self.boxStopFraction) + minY
self.printer.addstr(boxStartY, x, '/-\\')
for y in range(boxStartY + 1, boxStopY):
self.printer.addstr(y, x, '|-|')
self.printer.addstr(boxStopY, x, '\-/')
def outputCaps(self):
x = self.getX()
(maxy, maxx) = self.screenControl.getScreenDimensions()
for y in [self.getMinY() - 1, maxy - 1]:
self.printer.addstr(y, x, '===')
def outputBase(self):
x = self.getX()
(maxy, maxx) = self.screenControl.getScreenDimensions()
for y in range(self.getMinY(), maxy - 1):
self.printer.addstr(y, x, ' . ')
class Controller(object):
def __init__(self, flags, stdscr, lineObjs, cursesAPI):
self.stdscr = stdscr
self.cursesAPI = cursesAPI
self.cursesAPI.useDefaultColors()
self.colorPrinter = ColorPrinter(self.stdscr, cursesAPI)
self.flags = flags
self.lineObjs = lineObjs
self.hoverIndex = 0
self.scrollOffset = 0
self.scrollBar = ScrollBar(self.colorPrinter, lineObjs, self)
self.helperChrome = HelperChrome(self.colorPrinter, self)
(self.oldmaxy, self.oldmaxx) = self.getScreenDimensions()
self.mode = SELECT_MODE
self.simpleLines = []
self.lineMatches = []
# lets loop through and split
for lineObj in self.lineObjs.values():
lineObj.controller = self
if (lineObj.isSimple()):
self.simpleLines.append(lineObj)
else:
self.lineMatches.append(lineObj)
self.numLines = len(lineObjs.keys())
self.numMatches = len(self.lineMatches)
self.setHover(self.hoverIndex, True)
# the scroll offset might not start off
# at 0 if our first real match is WAY
# down the screen -- so lets init it to
# a valid value after we have all our line objects
self.updateScrollOffset()
logger.addEvent('init')
def getScrollOffset(self):
return self.scrollOffset
def getScreenDimensions(self):
return self.stdscr.getmaxyx()
def getChromeBoundaries(self):
(maxy, maxx) = self.stdscr.getmaxyx()
minx = CHROME_MIN_X if self.scrollBar.getIsActivated() else 0
maxy = self.helperChrome.reduceMaxY(maxy)
maxx = self.helperChrome.reduceMaxX(maxx)
# format of (MINX, MINY, MAXX, MAXY)
return (minx, CHROME_MIN_Y, maxx, maxy)
def getViewportHeight(self):
(minx, miny, maxx, maxy) = self.getChromeBoundaries()
return maxy - miny
def setHover(self, index, val):
self.lineMatches[index].setHover(val)
def toggleSelect(self):
self.dirtyHoverIndex()
self.lineMatches[self.hoverIndex].toggleSelect()
def toggleSelectAll(self):
files = set()
for line in self.lineMatches:
if line.getFile() not in files:
files.add(line.getFile())
line.toggleSelect()
self.dirtyLines()
def setSelect(self, val):
self.lineMatches[self.hoverIndex].setSelect(val)
def control(self):
# we start out by printing everything we need to
self.printAll()
self.resetDirty()
self.moveCursor()
while True:
inKey = self.getKey()
self.checkResize()
self.processInput(inKey)
self.processDirty()
self.resetDirty()
self.moveCursor()
self.stdscr.refresh()
def checkResize(self):
(maxy, maxx) = self.getScreenDimensions()
if (maxy is not self.oldmaxy or maxx is not self.oldmaxx):
# we resized so print all!
self.printAll()
self.resetDirty()
self.stdscr.refresh()
logger.addEvent('resize')
(self.oldmaxy, self.oldmaxx) = self.getScreenDimensions()
def updateScrollOffset(self):
"""
yay scrolling logic! we will start simple here
and basically just center the viewport to current
matched line
"""
windowHeight = self.getViewportHeight()
halfHeight = int(round(windowHeight / 2.0))
# important, we need to get the real SCREEN position
# of the hover index, not its index within our matches
hovered = self.lineMatches[self.hoverIndex]
desiredTopRow = hovered.getScreenIndex() - halfHeight
oldOffset = self.scrollOffset
desiredTopRow = max(desiredTopRow, 0)
newOffset = -desiredTopRow
# lets add in some leeway -- dont bother repositioning
# if the old offset is within 1/2 of the window height
# of our desired (unless we absolutely have to)
if abs(newOffset -
oldOffset) > halfHeight / 2 or self.hoverIndex + oldOffset < 0:
# need to reassign now we have gone too far
self.scrollOffset = newOffset
if oldOffset is not self.scrollOffset:
self.dirtyLines()
# also update our scroll bar
self.scrollBar.calcBoxFractions()
def pageDown(self):
pageHeight = (int)(self.getViewportHeight() * 0.5)
self.moveIndex(pageHeight)
def pageUp(self):
pageHeight = (int)(self.getViewportHeight() * 0.5)
self.moveIndex(-pageHeight)
def moveIndex(self, delta):
newIndex = (self.hoverIndex + delta) % self.numMatches
self.jumpToIndex(newIndex)
def jumpToIndex(self, newIndex):
self.setHover(self.hoverIndex, False)
self.dirtyHoverIndex()
self.hoverIndex = newIndex
self.setHover(self.hoverIndex, True)
self.dirtyHoverIndex()
self.updateScrollOffset()
def processInput(self, key):
if key == 'UP' or key == 'k':
self.moveIndex(-1)
elif key == 'DOWN' or key == 'j':
self.moveIndex(1)
elif key == 'c':
self.beginEnterCommand()
elif key == ' ' or key == 'PAGE_DOWN':
self.pageDown()
elif key == 'b' or key == 'PAGE_UP':
self.pageUp()
elif key == 'g':
self.jumpToIndex(0)
elif key == 'G':
self.jumpToIndex(self.numMatches - 1)
elif key == 'f':
self.toggleSelect()
elif key == 'A':
self.toggleSelectAll()
elif key == 'ENTER':
self.onEnter()
elif key == 'q':
output.outputNothing()
# this will get the appropriate selection and save it to a file for reuse
# before exiting the program
self.getFilesToUse()
self.cursesAPI.exit()
pass
def getFilesToUse(self):
# if we have select files, those, otherwise hovered
toUse = self.getSelectedFiles()
if not toUse:
toUse = self.getHoveredFiles()
# save the selection we are using
if self.cursesAPI.allowFileOutput():
output.outputSelection(toUse)
return toUse
def getSelectedFiles(self):
return [lineObj for (index, lineObj) in enumerate(self.lineMatches)
if lineObj.getSelected()]
def getHoveredFiles(self):
return [lineObj for (index, lineObj) in enumerate(self.lineMatches)
if index == self.hoverIndex]
def showAndGetCommand(self):
fileObjs = self.getFilesToUse()
files = [fileObj.getFile() for fileObj in fileObjs]
(maxy, maxx) = self.getScreenDimensions()
halfHeight = int(round(maxy / 2) - len(files) / 2.0)
borderLine = '=' * len(SHORT_COMMAND_PROMPT)
promptLine = '.' * len(SHORT_COMMAND_PROMPT)
# from helper chrome code
maxFileLength = maxx - 5
if self.helperChrome.getIsSidebarMode():
# need to be shorter to not go into side bar
maxFileLength = len(SHORT_COMMAND_PROMPT) + 18
# first lets print all the files
startHeight = halfHeight - 1 - len(files)
try:
self.stdscr.addstr(startHeight - 3, 0, borderLine)
self.stdscr.addstr(startHeight - 2, 0, SHORT_FILES_HEADER)
self.stdscr.addstr(startHeight - 1, 0, borderLine)
for index, file in enumerate(files):
self.stdscr.addstr(startHeight + index, 0,
file[0:maxFileLength])
except curses.error:
pass
# first print prompt
try:
self.stdscr.addstr(halfHeight, 0, SHORT_COMMAND_PROMPT)
self.stdscr.addstr(halfHeight + 1, 0, SHORT_COMMAND_PROMPT2)
except curses.error:
pass
# then line to distinguish and prompt line
try:
self.stdscr.addstr(halfHeight - 1, 0, borderLine)
self.stdscr.addstr(halfHeight + 2, 0, borderLine)
self.stdscr.addstr(halfHeight + 3, 0, promptLine)
except curses.error:
pass
self.stdscr.refresh()
self.cursesAPI.echo()
maxX = int(round(maxx - 1))
command = self.stdscr.getstr(halfHeight + 3, 0, maxX)
return command
def beginEnterCommand(self):
self.stdscr.clear()
# first check if they are trying to enter command mode
# but already have a command...
if len(self.flags.getPresetCommand()):
self.helperChrome.output(self.mode)
(_, minY, _, maxY) = self.getChromeBoundaries()
yStart = (maxY + minY) / 2 - 3
self.printProvidedCommandWarning(yStart)
self.getKey()
self.mode = SELECT_MODE
self.dirtyLines()
return
self.mode = COMMAND_MODE
self.helperChrome.output(self.mode)
logger.addEvent('enter_command_mode')
command = self.showAndGetCommand()
if len(command) == 0:
# go back to selection mode and repaint
self.mode = SELECT_MODE
self.cursesAPI.noecho()
self.dirtyLines()
logger.addEvent('exit_command_mode')
return
lineObjs = self.getFilesToUse()
output.execComposedCommand(command, lineObjs)
sys.exit(0)
def onEnter(self):
lineObjs = self.getFilesToUse()
if not lineObjs:
# nothing selected, assume we want hovered
lineObjs = self.getHoveredFiles()
logger.addEvent('selected_num_files', len(lineObjs))
# commands passed from the command line get used immediately
presetCommand = self.flags.getPresetCommand()
if len(presetCommand) > 0:
output.execComposedCommand(presetCommand, lineObjs)
else:
output.editFiles(lineObjs)
sys.exit(0)
def resetDirty(self):
# reset all dirty state for our components
self.linesDirty = False
self.dirtyIndexes = []
def dirtyHoverIndex(self):
self.dirtyIndexes.append(self.hoverIndex)
def dirtyLines(self):
self.linesDirty = True
def processDirty(self):
if self.linesDirty:
self.printAll()
for index in self.dirtyIndexes:
self.lineMatches[index].output(self.colorPrinter)
if self.helperChrome.getIsSidebarMode():
# need to output since lines can override
# the sidebar stuff
self.printChrome()
def printAll(self):
self.stdscr.clear()
self.printLines()
self.printScroll()
self.printChrome()
def printLines(self):
for lineObj in self.lineObjs.values():
lineObj.output(self.colorPrinter)
def printScroll(self):
self.scrollBar.output()
def printProvidedCommandWarning(self, yStart):
self.colorPrinter.setAttributes(
curses.COLOR_WHITE, curses.COLOR_RED, 0)
self.stdscr.addstr(yStart, 0, 'Oh no! You already provided a command so ' +
'you cannot enter command mode.')
self.stdscr.attrset(0)
self.stdscr.addstr(
yStart + 1, 0, 'The command you provided was "%s" ' % self.flags.getPresetCommand())
self.stdscr.addstr(
yStart + 2, 0, 'Press any key to go back to selecting files.')
def printChrome(self):
self.helperChrome.output(self.mode)
def moveCursor(self):
x = CHROME_MIN_X if self.scrollBar.getIsActivated() else 0
y = self.lineMatches[
self.hoverIndex].getScreenIndex() + self.scrollOffset
self.stdscr.move(y, x)
def getKey(self):
charCode = self.stdscr.getch()
return CODE_TO_CHAR.get(charCode, '')
| {
"content_hash": "2ff0aabf97f93d92a936cebf2e0a4014",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 101,
"avg_line_length": 33.57962962962963,
"alnum_prop": 0.6001764738322396,
"repo_name": "pallavagarwal07/PathPicker",
"id": "53f473989952008d2ff8e1f34294d0764db9e64d",
"size": "18430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/screenControl.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1268"
},
{
"name": "HTML",
"bytes": "6465"
},
{
"name": "Python",
"bytes": "84129"
},
{
"name": "Ruby",
"bytes": "895"
},
{
"name": "Shell",
"bytes": "3038"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/featurecapabilities") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class ComponentFeatureCapabilitiesOperations(object):
"""ComponentFeatureCapabilitiesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2015_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ApplicationInsightsComponentFeatureCapabilities":
"""Returns feature capabilities of the application insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentFeatureCapabilities, or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentFeatureCapabilities
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentFeatureCapabilities"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
request = build_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentFeatureCapabilities', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/featurecapabilities"} # type: ignore
| {
"content_hash": "e0f941fc069490ba99c7fc74c32a851e",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 215,
"avg_line_length": 41.75373134328358,
"alnum_prop": 0.6936550491510277,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6e42b2296ccfba8474f27f19c78aa5c96a7e7d3f",
"size": "6095",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/operations/_component_feature_capabilities_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.conf import settings
from mail.models import MailTemplate
# slug for the email template
PAYMENT_LATER = 'payment_later'
PAYMENT_THANKYOU = 'payment_thankyou'
def init_app_pay():
"""For the description, check 'mail_template_context' in 'pay.models'."""
MailTemplate.objects.init_mail_template(
PAYMENT_LATER,
'Thank you for your application',
(
"You can add the following variables to the template:\n"
"{{ name }} name of the customer.\n"
"{{ description }} transaction detail.\n"
"{{ total }} total value of the transaction."
),
False,
settings.MAIL_TEMPLATE_TYPE,
subject='Thank you for your application',
description="We will contact you to arrange payment.",
)
MailTemplate.objects.init_mail_template(
PAYMENT_THANKYOU,
'Thank you for your payment',
(
"You can add the following variables to the template:\n"
"{{ name }} name of the customer.\n"
"{{ description }} transaction detail.\n"
"{{ total }} total value of the transaction."
),
False,
settings.MAIL_TEMPLATE_TYPE,
subject='Thank you for your payment',
description="We will send you the course materials.",
)
| {
"content_hash": "a8859ed4e9c4fe5253bfd84d268e2b35",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 33.275,
"alnum_prop": 0.6040570999248686,
"repo_name": "pkimber/pay",
"id": "30af5f1b0434f23a5f9e1563f8cc279f84894d0a",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pay/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10029"
},
{
"name": "Python",
"bytes": "69074"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
from django_assets import Bundle, register
js = Bundle(
'js/vendor/jquery.scrollto.js',
'js/home.js',
output='gen/home.%(version)s.js')
css = Bundle(
'css/home.css',
output='gen/home.%(version)s.css')
register('js_home', js)
register('css_home', css) | {
"content_hash": "8d82fd7b35d6696cb4bedfa0e666e6b2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 42,
"avg_line_length": 19.846153846153847,
"alnum_prop": 0.6821705426356589,
"repo_name": "100Shapes/100shapes.github.com",
"id": "7a26f7e1f87cf9966f4504a12fb5de9e2cc56f88",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/src",
"path": "ohs_site/home/assets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "2204"
},
{
"name": "Python",
"bytes": "12734"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Circuito',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.TextField()),
('pos_latitud', models.FloatField()),
('pos_longitud', models.FloatField()),
('longitud', models.FloatField()),
],
),
migrations.CreateModel(
name='Clasificacion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('posicion', models.IntegerField()),
],
),
migrations.CreateModel(
name='Escuderia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.TextField()),
('fecha_fundacion', models.DateField()),
('empleados', models.IntegerField()),
],
),
migrations.CreateModel(
name='GranPremio',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fecha', models.DateField()),
('circuito', models.ForeignKey(to='competicion.Circuito')),
],
),
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.TextField()),
('apellidos', models.TextField()),
('fecha_nacimiento', models.DateField()),
],
),
migrations.CreateModel(
name='Piloto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numero_victorias', models.IntegerField()),
('numero_podios', models.IntegerField()),
('escuderia', models.ForeignKey(to='competicion.Escuderia')),
('persona', models.ForeignKey(to='competicion.Persona')),
],
),
migrations.AlterUniqueTogether(
name='persona',
unique_together=set([('nombre', 'apellidos', 'fecha_nacimiento')]),
),
migrations.AddField(
model_name='escuderia',
name='duenio',
field=models.ForeignKey(to='competicion.Persona'),
),
migrations.AddField(
model_name='clasificacion',
name='gran_premio',
field=models.ForeignKey(to='competicion.GranPremio'),
),
migrations.AddField(
model_name='clasificacion',
name='piloto',
field=models.ForeignKey(to='competicion.Piloto'),
),
migrations.AlterUniqueTogether(
name='circuito',
unique_together=set([('nombre', 'pos_latitud', 'pos_longitud', 'longitud')]),
),
migrations.AlterUniqueTogether(
name='piloto',
unique_together=set([('persona', 'escuderia', 'numero_victorias', 'numero_podios')]),
),
migrations.AlterUniqueTogether(
name='granpremio',
unique_together=set([('circuito', 'fecha')]),
),
migrations.AlterUniqueTogether(
name='escuderia',
unique_together=set([('nombre', 'fecha_fundacion', 'empleados', 'duenio')]),
),
migrations.AlterUniqueTogether(
name='clasificacion',
unique_together=set([('gran_premio', 'piloto', 'posicion')]),
),
]
| {
"content_hash": "a1b4162b16551f4689deb13c6b19fa94",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 114,
"avg_line_length": 38.25,
"alnum_prop": 0.5276520864756159,
"repo_name": "lendoly/djangoF1",
"id": "94fa913d76e871b7e4d4fa82cef6579a4ea857c3",
"size": "4002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SC/competicion/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2780"
},
{
"name": "Python",
"bytes": "20899"
}
],
"symlink_target": ""
} |
from indico.util.fossilize import IFossil
class IGroupFossil(IFossil):
def getId(self):
""" Group id """
def getName(self):
""" Group name """
def getEmail(self):
""" Group email """
def getProvider(self):
pass
getProvider.produce = lambda x: getattr(x, 'provider', None)
def getIdentifier(self):
pass
getIdentifier.produce = lambda x: 'Group:{}:{}'.format(getattr(x, 'provider', ''), x.id)
class IAvatarMinimalFossil(IFossil):
def getId(self):
""" Avatar id"""
def getIdentifier(self):
pass
getIdentifier.produce = lambda x: 'User:{}'.format(x.id)
def getStraightFullName(self):
""" Avatar full name, the one usually displayed """
getStraightFullName.name = "name"
getStraightFullName.produce = lambda x: x.getStraightFullName(upper=False)
class IAvatarFossil(IAvatarMinimalFossil):
def getEmail(self):
""" Avatar email """
def getFirstName(self):
""" Avatar first name """
def getFamilyName(self):
""" Avatar family name """
def getTitle(self):
""" Avatar name title (Mr, Mrs..) """
def getTelephone(self):
""" Avatar telephone """
getTelephone.name = "phone"
def getOrganisation(self):
""" Avatar organisation / affiliation """
getOrganisation.name = "affiliation"
def getFax(self):
""" Avatar fax """
def getAddress(self):
""" Avatar address """
| {
"content_hash": "c79dcfc8537919878c3b8fda9b1eed24",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 92,
"avg_line_length": 22.953846153846154,
"alnum_prop": 0.6032171581769437,
"repo_name": "mic4ael/indico",
"id": "6473d488829fa5f33014d4e976012ba37154822b",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/legacy/fossils/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""
Tests for L{twisted.internet.posixbase} and supporting code.
"""
from twisted.python.compat import set
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.protocol import ServerFactory
skipSockets = None
try:
from twisted.internet import unix
except ImportError:
skipSockets = "Platform does not support AF_UNIX sockets"
from twisted.internet.tcp import Port
from twisted.internet import reactor
from twisted.test.test_unix import ClientProto
class TrivialReactor(PosixReactorBase):
def __init__(self):
self._readers = {}
self._writers = {}
PosixReactorBase.__init__(self)
def addReader(self, reader):
self._readers[reader] = True
def removeReader(self, reader):
del self._readers[reader]
def addWriter(self, writer):
self._writers[writer] = True
def removeWriter(self, writer):
del self._writers[writer]
class PosixReactorBaseTests(TestCase):
"""
Tests for L{PosixReactorBase}.
"""
def _checkWaker(self, reactor):
self.assertIsInstance(reactor.waker, _Waker)
self.assertIn(reactor.waker, reactor._internalReaders)
self.assertIn(reactor.waker, reactor._readers)
def test_wakerIsInternalReader(self):
"""
When L{PosixReactorBase} is instantiated, it creates a waker and adds
it to its internal readers set.
"""
reactor = TrivialReactor()
self._checkWaker(reactor)
def test_removeAllSkipsInternalReaders(self):
"""
Any L{IReadDescriptors} in L{PosixReactorBase._internalReaders} are
left alone by L{PosixReactorBase._removeAll}.
"""
reactor = TrivialReactor()
extra = object()
reactor._internalReaders.add(extra)
reactor.addReader(extra)
reactor._removeAll(reactor._readers, reactor._writers)
self._checkWaker(reactor)
self.assertIn(extra, reactor._internalReaders)
self.assertIn(extra, reactor._readers)
def test_removeAllReturnsRemovedDescriptors(self):
"""
L{PosixReactorBase._removeAll} returns a list of removed
L{IReadDescriptor} and L{IWriteDescriptor} objects.
"""
reactor = TrivialReactor()
reader = object()
writer = object()
reactor.addReader(reader)
reactor.addWriter(writer)
removed = reactor._removeAll(
reactor._readers, reactor._writers)
self.assertEqual(set(removed), set([reader, writer]))
self.assertNotIn(reader, reactor._readers)
self.assertNotIn(writer, reactor._writers)
def test_IReactorArbitraryIsDeprecated(self):
"""
L{twisted.internet.interfaces.IReactorArbitrary} is redundant with
L{twisted.internet.interfaces.IReactorFDSet} and is deprecated.
"""
from twisted.internet import interfaces
interfaces.IReactorArbitrary
warningsShown = self.flushWarnings(
[self.test_IReactorArbitraryIsDeprecated])
self.assertEqual(len(warningsShown), 1)
self.assertEqual(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
"twisted.internet.interfaces.IReactorArbitrary was deprecated "
"in Twisted 10.1.0: See IReactorFDSet.",
warningsShown[0]['message'])
def test_listenWithIsDeprecated(self):
"""
L{PosixReactorBase} implements the deprecated L{IReactorArbitrary}, and
L{PosixReactorBase.listenWith} is a part of that interface. To avoid
unnecessary deprecation warnings when importing posixbase, the
L{twisted.internet.interfaces._IReactorArbitrary} alias that doesn't
have the deprecation warning is imported, and instead
L{PosixReactorBase.listenWith} generates its own deprecation warning.
"""
class fakePort:
def __init__(self, *args, **kw):
pass
def startListening(self):
pass
reactor = TrivialReactor()
reactor.listenWith(fakePort)
warnings = self.flushWarnings([self.test_listenWithIsDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
"listenWith is deprecated since Twisted 10.1. "
"See IReactorFDSet.",
warnings[0]['message'])
def test_connectWithIsDeprecated(self):
"""
L{PosixReactorBase} implements the deprecated L{IReactorArbitrary}, and
L{PosixReactorBase.connectWith} is a part of that interface. To avoid
unnecessary deprecation warnings when importing posixbase, the
L{twisted.internet.interfaces._IReactorArbitrary} alias that doesn't
have the deprecation warning is imported, and instead
L{PosixReactorBase.connectWith} generates its own deprecation warning.
"""
class fakeConnector:
def __init__(self, *args, **kw):
pass
def connect(self):
pass
reactor = TrivialReactor()
reactor.connectWith(fakeConnector)
warnings = self.flushWarnings([self.test_connectWithIsDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
"connectWith is deprecated since Twisted 10.1. "
"See IReactorFDSet.",
warnings[0]['message'])
class TCPPortTests(TestCase):
"""
Tests for L{twisted.internet.tcp.Port}.
"""
if not isinstance(reactor, PosixReactorBase):
skip = "Non-posixbase reactor"
def test_connectionLostFailed(self):
"""
L{Port.stopListening} returns a L{Deferred} which errbacks if
L{Port.connectionLost} raises an exception.
"""
port = Port(12345, ServerFactory())
port.connected = True
port.connectionLost = lambda reason: 1 / 0
return self.assertFailure(port.stopListening(), ZeroDivisionError)
class TimeoutReportReactor(PosixReactorBase):
"""
A reactor which is just barely runnable and which cannot monitor any
readers or writers, and which fires a L{Deferred} with the timeout
passed to its C{doIteration} method as soon as that method is invoked.
"""
def __init__(self):
PosixReactorBase.__init__(self)
self.iterationTimeout = Deferred()
self.now = 100
def addReader(self, reader):
"""
Ignore the reader. This is necessary because the waker will be
added. However, we won't actually monitor it for any events.
"""
def removeAll(self):
"""
There are no readers or writers, so there is nothing to remove.
This will be called when the reactor stops, though, so it must be
implemented.
"""
return []
def seconds(self):
"""
Override the real clock with a deterministic one that can be easily
controlled in a unit test.
"""
return self.now
def doIteration(self, timeout):
d = self.iterationTimeout
if d is not None:
self.iterationTimeout = None
d.callback(timeout)
class IterationTimeoutTests(TestCase):
"""
Tests for the timeout argument L{PosixReactorBase.run} calls
L{PosixReactorBase.doIteration} with in the presence of various delayed
calls.
"""
def _checkIterationTimeout(self, reactor):
timeout = []
reactor.iterationTimeout.addCallback(timeout.append)
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
reactor.run()
return timeout[0]
def test_noCalls(self):
"""
If there are no delayed calls, C{doIteration} is called with a
timeout of C{None}.
"""
reactor = TimeoutReportReactor()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
def test_delayedCall(self):
"""
If there is a delayed call, C{doIteration} is called with a timeout
which is the difference between the current time and the time at
which that call is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 100)
def test_timePasses(self):
"""
If a delayed call is scheduled and then some time passes, the
timeout passed to C{doIteration} is reduced by the amount of time
which passed.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
reactor.now += 25
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 75)
def test_multipleDelayedCalls(self):
"""
If there are several delayed calls, C{doIteration} is called with a
timeout which is the difference between the current time and the
time at which the earlier of the two calls is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(50, lambda: None)
reactor.callLater(10, lambda: None)
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 10)
def test_resetDelayedCall(self):
"""
If a delayed call is reset, the timeout passed to C{doIteration} is
based on the interval between the time when reset is called and the
new delay of the call.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 25
call.reset(15)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 15)
def test_delayDelayedCall(self):
"""
If a delayed call is re-delayed, the timeout passed to
C{doIteration} is based on the remaining time before the call would
have been made and the additional amount of time passed to the delay
method.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 10
call.delay(20)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 60)
def test_cancelDelayedCall(self):
"""
If the only delayed call is canceled, C{None} is the timeout passed
to C{doIteration}.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
call.cancel()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
class ConnectedDatagramPortTestCase(TestCase):
"""
Test connected datagram UNIX sockets.
"""
if skipSockets is not None:
skip = skipSockets
def test_connectionFailedDoesntCallLoseConnection(self):
"""
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
in L{ConnectedDatagramPort.connectionFailed}.
"""
def loseConnection():
"""
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
should not get called.
"""
self.fail("loseConnection is deprecated and should not get called.")
port = unix.ConnectedDatagramPort(None, ClientProto())
port.loseConnection = loseConnection
port.connectionFailed("goodbye")
def test_connectionFailedCallsStopListening(self):
"""
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
instead of the deprecated C{loseConnection} in
L{ConnectedDatagramPort.connectionFailed}.
"""
self.called = False
def stopListening():
"""
Dummy C{stopListening} method.
"""
self.called = True
port = unix.ConnectedDatagramPort(None, ClientProto())
port.stopListening = stopListening
port.connectionFailed("goodbye")
self.assertEqual(self.called, True)
| {
"content_hash": "2aa24e7b3f047880fffb27b85b1cae08",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 80,
"avg_line_length": 32.197916666666664,
"alnum_prop": 0.6431575541895826,
"repo_name": "Varriount/Colliberation",
"id": "1d6c72cd25f84519192bc4152440a96d6fd35fb0",
"size": "12437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/twisted/internet/test/test_posixbase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "509005"
},
{
"name": "D",
"bytes": "29"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "10503398"
},
{
"name": "Shell",
"bytes": "1512"
}
],
"symlink_target": ""
} |
from .aes import AES
from .rsa import RSA, RSAServerKey
from .auth_key import AuthKey
from .factorization import Factorization
| {
"content_hash": "8cf73c084dd7c0244e66764d248f22ea",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 31.75,
"alnum_prop": 0.8188976377952756,
"repo_name": "kyasabu/Telethon",
"id": "81416cd9d1b7e1109450d37418f9274b3cf4adc7",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/better-updates",
"path": "telethon/crypto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "237842"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Henry Walshaw'
__date__ = '2015-11-02'
__copyright__ = '(C) 2015 by Henry Walshaw'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os.path
from PyQt4.QtCore import QSettings
from qgis.core import (
QgsVectorFileWriter, QgsFields, QGis, QgsFeature, QgsGeometry, QgsPoint)
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import (
ParameterVector, ParameterNumber, ParameterTableField, ParameterBoolean
)
from processing.core.outputs import OutputVector, OutputNumber
from processing.tools import dataobjects, vector
import numpy as np
from scipy.stats import gamma, uniform
from anon_utils import DifferentialPrivacyUtils
class DifferentialPrivacyAlgorithm(GeoAlgorithm):
"""
Differential Privacy algorithm implementing the method outlined in:
Andrés, M.E. et al., 2013. Geo-indistinguishability. In the 2013 ACM SIGSAC
conference. New York, New York, USA: ACM Press, pp. 901–914.
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
PROTECTION_DISTANCE = 'PROTECTION_DISTANCE'
NINETY_FIVE_DISTANCE = 'NINETY_FIVE_DISTANCE'
LIMIT_NINETY_FIVE = 'LIMIT_NINETY_FIVE'
def getIcon(self):
"""Get the icon.
"""
return DifferentialPrivacyUtils.getIcon()
def defineCharacteristics(self):
"""Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# The name that the user will see in the toolbox
self.name = 'Differential Privacy - independent points'
# The branch of the toolbox under which the algorithm will appear
self.group = 'Vector'
# We add the input vector layer. It can have any kind of geometry
# It is a mandatory (not optional) one, hence the False argument
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_POINT], False))
self.addParameter(ParameterNumber(
self.PROTECTION_DISTANCE,
self.tr('Protection distance (projected units)'),
minValue=0.,
default=500
))
self.addParameter(ParameterBoolean(
self.LIMIT_NINETY_FIVE,
"Limit the distance moved to the 95% confidence interval",
default=False
))
# We add a vector layer as output
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Anonymized features')))
self.addOutput(OutputNumber(
self.NINETY_FIVE_DISTANCE,
"95% confidence distance for offset"
))
def processAlgorithm(self, progress):
"""Here is where the processing itself takes place."""
# The first thing to do is retrieve the values of the parameters
# entered by the user
inputFilename = self.getParameterValue(self.INPUT_LAYER)
radius = float(self.getParameterValue(
self.PROTECTION_DISTANCE))
base_epsilon = float(ProcessingConfig.getSetting(
DifferentialPrivacyUtils.DIFFERENTIAL_EPSILON))
limit_nine_five = self.getParameterValue(self.LIMIT_NINETY_FIVE)
# scale should be 1 / epsilon where epsilon is some base epsilon constant / chosen radius
r_generator = gamma(2., scale=radius / base_epsilon)
theta_generator = uniform(scale=2 * np.pi)
output = self.getOutputValue(self.OUTPUT_LAYER)
# Input layers vales are always a string with its location.
# That string can be converted into a QGIS object (a
# QgsVectorLayer in this case) using the
# processing.getObjectFromUri() method.
vectorLayer = dataobjects.getObjectFromUri(inputFilename)
# And now we can process
# First we create the output layer. The output value entered by
# the user is a string containing a filename, so we can use it
# directly
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
provider = vectorLayer.dataProvider()
writer = QgsVectorFileWriter(output, systemEncoding,
provider.fields(),
provider.geometryType(), provider.crs())
# Now we take the features from input layer and add them to the
# output. Method features() returns an iterator, considering the
# selection that might exist in layer and the configuration that
# indicates should algorithm use only selected features or all
# of them
nine_five_distance = r_generator.ppf(0.95)
features = vector.features(vectorLayer)
for f in features:
r = r_generator.rvs()
if limit_nine_five and r > nine_five_distance:
r = nine_five_distance
theta = theta_generator.rvs()
g = f.geometryAndOwnership()
g.translate(np.cos(theta) * r, np.sin(theta) * r)
f.setGeometry(g)
writer.addFeature(f)
ProcessingLog.addToLog(
ProcessingLog.LOG_INFO,
"95% confiedence distance: {}".format(nine_five_distance)
)
self.setOutputValue(self.NINETY_FIVE_DISTANCE, nine_five_distance)
def help(self):
"""
Get the help documentation for this algorithm.
:return: Help text is html from string, the help html
:rtype: bool, str
"""
help_data = open(os.path.join(
os.path.dirname(__file__),
"doc",
"independent_points.html"
)).read()
return True, help_data
class DisplacementLines(GeoAlgorithm):
"""
Algorithm for visualising the movement of points displaced.
Takes two input point layers with ID columns which should match, and builds
lines between points with matching IDs.
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
OUTPUT_LAYER = 'OUTPUT_LAYER'
BASE_LAYER = 'BASE_LAYER'
DISPLACED_LAYER = 'DISPLACED_LAYER'
BASE_ID_FIELD = 'BASE_ID_FIELD'
DISPLACED_ID_FIELD = 'DISPLACED_ID_FIELD'
def getIcon(self):
"""Get the icon.
"""
return DifferentialPrivacyUtils.getIcon()
def defineCharacteristics(self):
"""Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# The name that the user will see in the toolbox
self.name = 'Displacement Lines'
# The branch of the toolbox under which the algorithm will appear
self.group = 'Vector'
# We add the input vector layer. It can have any kind of geometry
# It is a mandatory (not optional) one, hence the False argument
self.addParameter(ParameterVector(
self.BASE_LAYER,
self.tr('Base layer'),
[ParameterVector.VECTOR_TYPE_POINT],
False
))
self.addParameter(ParameterTableField(
self.BASE_ID_FIELD,
self.tr('Base layer ID field'),
self.BASE_LAYER
))
self.addParameter(ParameterVector(
self.DISPLACED_LAYER,
self.tr('Displaced layer'),
[ParameterVector.VECTOR_TYPE_POINT],
False
))
self.addParameter(ParameterTableField(
self.DISPLACED_ID_FIELD,
self.tr('Displaced layer ID field'),
self.DISPLACED_LAYER
))
# We add a vector layer as output
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Displacement lines')))
def processAlgorithm(self, progress):
"""Here is where the processing itself takes place."""
# The first thing to do is retrieve the values of the parameters
# entered by the user
base_filename = self.getParameterValue(self.BASE_LAYER)
displaced_filename = self.getParameterValue(self.DISPLACED_LAYER)
output = self.getOutputValue(self.OUTPUT_LAYER)
# Input layers vales are always a string with its location.
# That string can be converted into a QGIS object (a
# QgsVectorLayer in this case) using the
# processing.getObjectFromUri() method.
base_layer = dataobjects.getObjectFromUri(base_filename)
displayed_layer = dataobjects.getObjectFromUri(displaced_filename)
# And now we can process
# First, get the ID field index for each layer
base_id_idx = base_layer.fieldNameIndex(self.getParameterValue(
self.BASE_ID_FIELD
))
displaced_id_idx = displayed_layer.fieldNameIndex(self.getParameterValue(
self.DISPLACED_ID_FIELD
))
# Grab the ID field and drop it in a fields object for the output.
fields = QgsFields()
fields.append(
displayed_layer.fields()[displaced_id_idx]
)
# Displaced points
features = vector.features(displayed_layer)
displaced_points = {
f[displaced_id_idx]: f.geometry().asPoint()
for f in features
}
# Base points
features = vector.features(base_layer)
base_points = {
f[base_id_idx]: f.geometry().asPoint()
for f in features
}
# Build the output layer
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
provider = displayed_layer.dataProvider()
writer = QgsVectorFileWriter(
output, systemEncoding, fields, QGis.WKBLineString, provider.crs())
# Loop over the displayed points and build the line that links them to
# the base points
for id, endpoint in displaced_points.iteritems():
try:
startpoint = base_points[id]
except KeyError:
ProcessingLog.addToLog(
ProcessingLog.LOG_WARNING,
"Couldn't find input feature with ID {}".format(
id
)
)
else:
feature = QgsFeature()
feature.setGeometry(QgsGeometry.fromPolyline(
[startpoint, endpoint]))
feature.setAttributes([id, ])
writer.addFeature(feature)
# There is nothing more to do here. We do not have to open the
# layer that we have created. The framework will take care of
# that, or will handle it if this algorithm is executed within
# a complex model
def help(self):
"""
Get the help documentation for this algorithm.
:return: Help text is html from string, the help html
:rtype: bool, str
"""
help_data = open(os.path.join(
os.path.dirname(__file__),
"doc",
"displacement_lines.html"
)).read()
return True, help_data
class GridBasedMasking(GeoAlgorithm):
"""
Grid based masking algorithm as described in:
Seidl, D.E., Jankowski, P. & Tsou, M.-H., 2015. Privacy and spatial pattern
preservation in masked GPS trajectory data. International Journal of
Geographical Information Science, 30(4), pp.785–800.
"""
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
GRID_SIZE = 'GRID_SIZE'
X_OFFSET = 'X_OFFSET'
Y_OFFSET = 'Y_OFFSET'
def getIcon(self):
"""Get the icon.
"""
return DifferentialPrivacyUtils.getIcon()
def defineCharacteristics(self):
"""Here we define the inputs and output of the algorithm, along
with some other properties.
"""
self.name = 'Grid based masking'
# The branch of the toolbox under which the algorithm will appear
self.group = 'Vector'
# We add the input vector layer. It can have any kind of geometry
# It is a mandatory (not optional) one, hence the False argument
self.addParameter(ParameterVector(
self.INPUT_LAYER,
self.tr('Input layer'),
[ParameterVector.VECTOR_TYPE_POINT],
False
))
self.addParameter(ParameterNumber(
self.GRID_SIZE,
self.tr('Grid size'),
minValue=0.,
default=500
))
# We add a vector layer as output
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Anonymized features')))
self.addParameter(ParameterNumber(
self.X_OFFSET,
"X grid offset",
minValue=0.0
))
self.addParameter(ParameterNumber(
self.Y_OFFSET,
"Y grid offset",
minValue=0.0
))
def round_to_grid(self, point, cell_size, x_offset, y_offset):
"""
Round the coordinates of a point to the points of a grid.
:param point: The moint to migrate.
:type point: QgsPoint
:param cell_size: Size of the grid to round towards
:type cell_size: float
:return: The migrated point
:rtype: QgsPoint
"""
xy = np.array([point.x(), point.y()]) - np.array([x_offset, y_offset])
new_xy = np.round(xy / cell_size) * cell_size + np.array([x_offset, y_offset])
return QgsPoint(*new_xy)
def processAlgorithm(self, progress):
"""Here is where the processing itself takes place."""
# The first thing to do is retrieve the values of the parameters
# entered by the user
inputFilename = self.getParameterValue(self.INPUT_LAYER)
grid_size = float(self.getParameterValue(self.GRID_SIZE))
x_offset = float(self.getParameterValue(self.X_OFFSET))
y_offset = float(self.getParameterValue(self.Y_OFFSET))
output = self.getOutputValue(self.OUTPUT_LAYER)
# Input layers vales are always a string with its location.
# That string can be converted into a QGIS object (a
# QgsVectorLayer in this case) using the
# processing.getObjectFromUri() method.
vectorLayer = dataobjects.getObjectFromUri(inputFilename)
# And now we can process
# First we create the output layer. The output value entered by
# the user is a string containing a filename, so we can use it
# directly
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
provider = vectorLayer.dataProvider()
writer = QgsVectorFileWriter(output, systemEncoding,
provider.fields(),
provider.geometryType(), provider.crs())
# Now we take the features from input layer and add them to the
# output. Method features() returns an iterator, considering the
# selection that might exist in layer and the configuration that
# indicates should algorithm use only selected features or all
# of them
features = vector.features(vectorLayer)
for f in features:
g = f.geometryAndOwnership()
new_point = self.round_to_grid(
g.asPoint(), grid_size, x_offset, y_offset)
f.setGeometry(QgsGeometry.fromPoint(new_point))
writer.addFeature(f)
def help(self):
"""
Get the help documentation for this algorithm.
:return: Help text is html from string, the help html
:rtype: bool, str
"""
help_data = open(os.path.join(
os.path.dirname(__file__),
"doc",
"grid_based_masking.html"
)).read()
return True, help_data
| {
"content_hash": "f36c141eba8c1cd55c1f33a6de314752",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 97,
"avg_line_length": 34.33808553971487,
"alnum_prop": 0.6030842230130486,
"repo_name": "SpatialVision/differential_privacy",
"id": "36b6c30687679c26ca53dc8382e09bc01fb801a9",
"size": "17356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anon_methods_algorithm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26643"
}
],
"symlink_target": ""
} |
from google.cloud import talent_v4beta1
async def sample_get_company():
# Create a client
client = talent_v4beta1.CompanyServiceAsyncClient()
# Initialize request argument(s)
request = talent_v4beta1.GetCompanyRequest(
name="name_value",
)
# Make the request
response = await client.get_company(request=request)
# Handle the response
print(response)
# [END jobs_v4beta1_generated_CompanyService_GetCompany_async]
| {
"content_hash": "7f5eed45709529c4a3481dfa141c76db",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 24.36842105263158,
"alnum_prop": 0.712742980561555,
"repo_name": "googleapis/python-talent",
"id": "f3c4a42a3ec7c070fb031a71d930aed21ca0545f",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/jobs_v4beta1_generated_company_service_get_company_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2538179"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
"""
Kernel Density Estimator
"""
from __future__ import print_function
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class KDE(object):
"""
use Epanechnikov kernel
"""
def fit(self, data):
"""
:param data: data to fit, numpy 2-D array
"""
# TODO: Figure out what the bandwidth should be using
# Gaussian Bandwidth Selector
pass
def log_density(self, data):
"""
:param data: data to predict density for, numpy 2-D array
:return: numpy 1-D array, with natural log density of each point
provided.
"""
# TODO: Evaluate and return log-densities
pass
def parse_args(*argument_array):
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='readings.csv')
args = parser.parse_args(*argument_array)
return args
def main(args):
df = pd.read_csv(args.data)
X = np.array(df[['reading']])
plt.hist(X, bins=20)
kde = KDE()
kde.fit(X)
# TODO: create a grid for plotting
# TODO: calculate density of a grid
# TODO: scale density so it matches the histogram
# TODO: plot scaled density
plt.tight_layout()
plt.show()
if __name__ == '__main__':
args = parse_args()
main(args)
| {
"content_hash": "25dd7dd855e2ecba986456206d70e931",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 23.140350877192983,
"alnum_prop": 0.6110689916603488,
"repo_name": "mamikonyana/mamikonyana.github.io",
"id": "725e645fe4c3d0add468b568b4e077985b1d871c",
"size": "1342",
"binary": false,
"copies": "1",
"ref": "refs/heads/flask",
"path": "static/ml_afternoon/presentation_data/practical_s2/kde.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "102"
},
{
"name": "HTML",
"bytes": "11586263"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "95088"
},
{
"name": "Shell",
"bytes": "1662"
},
{
"name": "Stan",
"bytes": "872"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
from robo.solver.hyperband_datasets_size import HyperBand_DataSubsets
from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM
run_id = int(sys.argv[1])
seed = int(sys.argv[2])
rng = np.random.RandomState(seed)
dataset = "surrogate"
f = SurrogateSVM(path="/mhome/kleinaa/experiments/fabolas/dataset/svm_on_mnist_grid", rng=rng)
output_path = "/mhome/kleinaa/experiments/fabolas_journal/results/svm_%s/hyperband_last_seen_incumbent_%d" % (dataset, run_id)
os.makedirs(output_path, exist_ok=True)
eta = 3.
B = -int(np.log(f.s_min)/np.log(3))
print(B)
opt = HyperBand_DataSubsets(f, eta, eta**(-(B-1)), output_path=output_path, rng=rng)
opt.run(int(20 / B * 1.5))
test_error = []
runtime = []
cum_cost = 0
for i, c in enumerate(opt.incumbents):
test_error.append(f.objective_function_test(c)["function_value"])
results = dict()
results["test_error"] = test_error
cum_cost += opt.time_func_eval_incumbent[i]
runtime.append(opt.runtime[i] + cum_cost)
results["runtime"] = runtime
results["run_id"] = run_id
with open(os.path.join(output_path, 'results_%d.json' % run_id), 'w') as fh:
json.dump(results, fh)
| {
"content_hash": "f19e7cc63647e4e94f72ddad37f8c573",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 126,
"avg_line_length": 25.38,
"alnum_prop": 0.6989755713159969,
"repo_name": "numairmansur/RoBO",
"id": "ca036d2b911912fd1b27e2e4cdc9e170f4359877",
"size": "1269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/bayesopt/run_hyperband_surrogate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1008348"
},
{
"name": "Python",
"bytes": "340324"
},
{
"name": "Shell",
"bytes": "1006"
}
],
"symlink_target": ""
} |
from distutils.core import setup
import sys
reload(sys).setdefaultencoding("UTF-8")
version = '0.2.2'
setup(
name ='django-fabdeploy-extensions',
version = version,
author = 'Future Colors',
author_email = '[email protected]',
packages = ['fabdeploy_extensions', 'fabdeploy_extensions.extensions'],
url = 'https://github.com/futurecolors/django-fabdeploy-extensions',
download_url = 'https://github.com/futurecolors/django-fabdeploy-extensions/zipball/master',
license = 'MIT license',
description = u'Fabdeploy extension to use UWSGI'.encode('utf8'),
long_description = open('README').read().decode('utf8'),
requires = ['fab_deploy (>=0.7.1)', 'Fabric (>=1.0.0)', 'jinja2'],
classifiers=(
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
),
)
| {
"content_hash": "69303ba69aa71ba051bc2aeccfec0f92",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 96,
"avg_line_length": 35.86666666666667,
"alnum_prop": 0.6477695167286245,
"repo_name": "futurecolors/django-fabdeploy-extensions",
"id": "8833e3c44b484678b82261570be1037872ea2a24",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18736"
},
{
"name": "Shell",
"bytes": "11039"
}
],
"symlink_target": ""
} |
import views
from django.conf.urls import url
from djangosaml2 import views
urlpatterns = [
url(r'^login/$', views.login, name='saml2_login'),
url(r'^acs/$', views.assertion_consumer_service, name='saml2_acs'),
url(r'^logout/$', views.logout, name='saml2_logout'),
url(r'^ls/$', views.logout_service, name='saml2_ls'),
url(r'^ls/post/$', views.logout_service_post, name='saml2_ls_post'),
url(r'^metadata/$', views.metadata, name='saml2_metadata'),
]
| {
"content_hash": "3355c71e1387347f690528d69ece3be9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.6624737945492662,
"repo_name": "GradConnection/djangosaml2",
"id": "95f48004b26ccf4a833201082c18c00559a9b860",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "djangosaml2/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3510"
},
{
"name": "Python",
"bytes": "145646"
}
],
"symlink_target": ""
} |
import os
def upload_path(instance, filename):
_, ext = os.path.splitext(filename)
return "avatars/{pk}{ext}".format(pk=instance.pk, ext=ext)
| {
"content_hash": "9d7b81d9754226dd787115a4659bc59d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 62,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6842105263157895,
"repo_name": "Uran198/med",
"id": "a8dc917dddd4c8b54d79ffb0eb9141fef5d74c2f",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "med/users/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8906"
},
{
"name": "HTML",
"bytes": "43613"
},
{
"name": "JavaScript",
"bytes": "8672"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "110287"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
} |
"""Converts COCO data to TFRecord file format.
This data is used for the out-of-domain supervised pretraining (Sec. 4.3.1).
It is also used to train the MLE baseline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from absl import app
from absl import flags
from language.capwap.utils import image_utils
from language.capwap.utils import io_utils
from language.capwap.utils import text_utils
import tensorflow.compat.v1 as tf
DATA_DIR = os.getenv("CAPWAP_DATA", "data")
COCO_DIR = os.path.join(DATA_DIR, "COCO")
flags.DEFINE_string("splits", os.path.join(COCO_DIR, "karpathy_splits.json"),
"Path to JSON file with pre-split image ids.")
flags.DEFINE_string("coco_path", COCO_DIR, "Path to COCO data.")
flags.DEFINE_string("output_dir", os.path.join(COCO_DIR, "processed/captions"),
"Output data directory.")
flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
flags.DEFINE_string("vocab_path", os.path.join(DATA_DIR, "uncased_vocab.txt"),
"Path to BERT directory.")
FLAGS = flags.FLAGS
def load_captions(captions_file, vocab):
"""Loads image ids and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
vocab: A text_utils.Vocab instance.
Returns:
captions: Dictionary of image_id --> captions.
"""
tf.logging.info("Loading captions from %s", captions_file)
with tf.io.gfile.GFile(captions_file, "r") as f:
caption_data = json.load(f)
image_to_captions = collections.defaultdict(list)
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption_tokens = vocab.tokenize(annotation["caption"])
image_to_captions[image_id].append(" ".join(caption_tokens))
return image_to_captions
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
tf.logging.set_verbosity(tf.logging.INFO)
tf.io.gfile.makedirs(FLAGS.output_dir)
# Load data and re-split according to Karpathy paper.
splits = io_utils.load_karpathy_splits(FLAGS.splits)
vocab = text_utils.Vocab.load(FLAGS.vocab_path)
captions = collections.defaultdict(list)
for split in ["train", "val"]:
captions_file = ("%s/annotations/captions_%s2014.json" %
(FLAGS.coco_path, split))
for image_id, split_captions in load_captions(captions_file, vocab).items():
captions[image_id].extend(split_captions)
for split, image_ids in splits.items():
# Convert to ImageMetadata.
images = []
for image_id, image_captions in captions.items():
if image_id not in image_ids:
continue
metadata = image_utils.ImageMetadata(
image_id=image_id,
captions=image_captions,
objects="%s/%s_features.hdf5" % (FLAGS.coco_path, split))
images.append(metadata)
# Dump to sharded TFRecords.
io_utils.convert_to_tfrecords(
dataset=images,
num_shards=getattr(FLAGS, "%s_shards" % split),
basename=os.path.join(FLAGS.output_dir, split),
example_fn=io_utils.caption_example)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| {
"content_hash": "ef7c032a453aa6a693ec6a6d817a5da0",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 32.7037037037037,
"alnum_prop": 0.6778029445073612,
"repo_name": "google-research/language",
"id": "8360c8f8e08db7fa1baaf8f000d460dcb809827b",
"size": "4147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/capwap/preprocessing/coco_ood_captions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
'''
Exposes all public package contents.
@author: Eitan Isaacson
@organization: Mozilla Foundation
@copyright: Copyright (c) 2006, 2007 Mozilla Foundation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
from base_plugin import Plugin, ViewportPlugin, ConsolePlugin
from plugin_manager import PluginManager
from view import PluginView
| {
"content_hash": "9295abe28e982569d6ef831ae25b1f6b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 31.294117647058822,
"alnum_prop": 0.8045112781954887,
"repo_name": "javihernandez/accerciser-mirror",
"id": "51ae04c38edaf17c3b7eb70d04ed9d98de6dc3af",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/accerciser/plugin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "384881"
},
{
"name": "Shell",
"bytes": "363"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons import OSCheck
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
'HIVE_METASTORE' : 'hive-metastore',
'HIVE_SERVER' : 'hive-server2',
'WEBHCAT_SERVER' : 'hive-webhcat',
'HIVE_CLIENT' : 'hive-client',
'HCAT' : 'hive-client'
}
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
config = Script.get_config()
if OSCheck.is_windows_family():
hive_metastore_win_service_name = "metastore"
hive_client_win_service_name = "hwi"
hive_server_win_service_name = "hiveserver2"
webhcat_server_win_service_name = "templeton"
else:
hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
hive_pid = 'hive-server.pid'
hive_metastore_pid = 'hive.pid'
hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
process_name = 'mysqld'
if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
daemon_name = 'mysql'
else:
if OSCheck.is_redhat_family() and OSCheck.get_os_major_version() == "7":
daemon_name = 'mariadb'
else:
daemon_name = 'mysqld'
# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hive_user = config['configurations']['hive-env']['hive_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
# default configuration directories
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
webhcat_conf_dir = '/etc/hive-webhcat/conf'
hive_etc_dir_prefix = "/etc/hive"
hive_conf_dir = "/etc/hive/conf"
hive_client_conf_dir = "/etc/hive/conf"
# !!! required by ranger to be at this location unless HDP 2.3+
hive_server_conf_dir = "/etc/hive/conf.server"
hive_config_dir = hive_client_conf_dir
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
hive_config_dir = hive_server_conf_dir
| {
"content_hash": "89b2937fd76cd900b109d0ce1f16f130",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 106,
"avg_line_length": 39.23863636363637,
"alnum_prop": 0.735881841876629,
"repo_name": "arenadata/ambari",
"id": "ada0500bbb97e5b7bee352be8e710bb6bf84fc48",
"size": "3475",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/ADH/1.0/services/HIVE/package/scripts/status_params.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a HideYoCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a HideYoCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| {
"content_hash": "ee4708d84322ee18b90dd69626956ac1",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.203703703703702,
"alnum_prop": 0.6619484825299669,
"repo_name": "kraxis/hideyocoin",
"id": "583f6b3603cf065820ab0c3bf20846a07929d385",
"size": "7842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "92503"
},
{
"name": "C++",
"bytes": "2555116"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "29436"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69724"
},
{
"name": "Shell",
"bytes": "13173"
},
{
"name": "TypeScript",
"bytes": "5244753"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from cproton import pn_incref, pn_decref, \
pn_py2void, pn_void2py, \
pn_record_get, pn_record_def, pn_record_set, \
PN_PYREF
from ._exceptions import ProtonException
class EmptyAttrs:
def __contains__(self, name):
return False
def __getitem__(self, name):
raise KeyError(name)
def __setitem__(self, name, value):
raise TypeError("does not support item assignment")
EMPTY_ATTRS = EmptyAttrs()
class Wrapper(object):
""" Wrapper for python objects that need to be stored in event contexts and be retrived again from them
Quick note on how this works:
The actual *python* object has only 3 attributes which redirect into the wrapped C objects:
_impl The wrapped C object itself
_attrs This is a special pn_record_t holding a PYCTX which is a python dict
every attribute in the python object is actually looked up here
_record This is the C record itself (so actually identical to _attrs really but
a different python type
Because the objects actual attributes are stored away they must be initialised *after* the wrapping
is set up. This is the purpose of the _init method in the wrapped object. Wrapper.__init__ will call
eht subclass _init to initialise attributes. So they *must not* be initialised in the subclass __init__
before calling the superclass (Wrapper) __init__ or they will not be accessible from the wrapper at all.
"""
def __init__(self, impl_or_constructor, get_context=None):
init = False
if callable(impl_or_constructor):
# we are constructing a new object
impl = impl_or_constructor()
if impl is None:
self.__dict__["_impl"] = impl
self.__dict__["_attrs"] = EMPTY_ATTRS
self.__dict__["_record"] = None
raise ProtonException(
"Wrapper failed to create wrapped object. Check for file descriptor or memory exhaustion.")
init = True
else:
# we are wrapping an existing object
impl = impl_or_constructor
pn_incref(impl)
if get_context:
record = get_context(impl)
attrs = pn_void2py(pn_record_get(record, PYCTX))
if attrs is None:
attrs = {}
pn_record_def(record, PYCTX, PN_PYREF)
pn_record_set(record, PYCTX, pn_py2void(attrs))
init = True
else:
attrs = EMPTY_ATTRS
init = False
record = None
self.__dict__["_impl"] = impl
self.__dict__["_attrs"] = attrs
self.__dict__["_record"] = record
if init:
self._init()
def __getattr__(self, name):
attrs = self.__dict__["_attrs"]
if name in attrs:
return attrs[name]
else:
raise AttributeError(name + " not in _attrs")
def __setattr__(self, name, value):
if hasattr(self.__class__, name):
object.__setattr__(self, name, value)
else:
attrs = self.__dict__["_attrs"]
attrs[name] = value
def __delattr__(self, name):
attrs = self.__dict__["_attrs"]
if attrs:
del attrs[name]
def __hash__(self):
return hash(addressof(self._impl))
def __eq__(self, other):
if isinstance(other, Wrapper):
return addressof(self._impl) == addressof(other._impl)
return False
def __ne__(self, other):
if isinstance(other, Wrapper):
return addressof(self._impl) != addressof(other._impl)
return True
def __del__(self):
pn_decref(self._impl)
def __repr__(self):
return '<%s.%s 0x%x ~ 0x%x>' % (self.__class__.__module__,
self.__class__.__name__,
id(self), addressof(self._impl))
PYCTX = int(pn_py2void(Wrapper))
addressof = int
| {
"content_hash": "408f7b1acbd32d394574fbc23de56852",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 112,
"avg_line_length": 34.141666666666666,
"alnum_prop": 0.5626067854527703,
"repo_name": "ssorj/qpid-proton",
"id": "4703b348f7f4d7e13f6ff06f9112938d5b301046",
"size": "4887",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/proton/_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1612283"
},
{
"name": "C++",
"bytes": "1432387"
},
{
"name": "CMake",
"bytes": "123649"
},
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Go",
"bytes": "322531"
},
{
"name": "Objective-C",
"bytes": "6108"
},
{
"name": "Python",
"bytes": "829697"
},
{
"name": "Ruby",
"bytes": "365171"
},
{
"name": "SWIG",
"bytes": "45104"
},
{
"name": "Shell",
"bytes": "23043"
}
],
"symlink_target": ""
} |
"""
Given a list containing N integers, calculate the XOR_SUM of all the non-empty subsets of the list and print the value
of sum % (109 + 7).
XOR operation on a list (or a subset of the list) is defined as the XOR of all the elements present in it.
E.g. XOR of list containing elements {A,B,C} = ((A^B)^C), where ^ represents XOR.
E.g. XOR_SUM of list A having three elements {X1, X2, X3} can be given as follows.
All non-empty subsets will be {X1, X2, X3, (X1,X2), (X2,X3), (X1,X3), (X1,X2,X3)}
XOR_SUM(A) = X1 + X2 + X3 + X1^X2 + X2^X3 + X1^X3 + ((X1^X2)^X3)
Input Format
An integer T, denoting the number of testcases. 2T lines follow.
Each testcase contains two lines, first line will contains an integer N followed by second line containing N integers
separated by a single space.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
Algorithm:
From the prospective of bit
http://math.stackexchange.com/questions/712487/finding-xor-of-all-subsets
:param cipher: the cipher
"""
length, lst = cipher
return reduce(lambda x, y: x | y, lst) * 2 ** (length - 1) % (10 ** 9 + 7)
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
length = int(f.readline().strip())
lst = map(lambda x: int(x), f.readline().strip().split(" "))
cipher = [length, lst]
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| {
"content_hash": "055bfcf3ce32f71d5e9fa223d92d9f2d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 118,
"avg_line_length": 33.291666666666664,
"alnum_prop": 0.6132665832290363,
"repo_name": "algorhythms/HackerRankAlgorithms",
"id": "a87addfcc5ffc688d126feecbe7d18856553642b",
"size": "1598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Xoring Ninja.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5450"
},
{
"name": "Java",
"bytes": "2566"
},
{
"name": "Python",
"bytes": "208997"
}
],
"symlink_target": ""
} |
import unittest
import mock
from algoliasearch.exceptions import RequestException, MissingObjectIdException
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.responses import Response
from algoliasearch.search_index import SearchIndex
from algoliasearch.configs import SearchConfig
from algoliasearch.http.transporter import Transporter
from algoliasearch.http.requester import Requester
from tests.helpers.factory import Factory as F
class TestSearchIndex(unittest.TestCase):
def setUp(self):
self.config = SearchConfig("foo", "bar")
requester = Requester()
self.transporter = Transporter(requester, self.config)
self.transporter.read = mock.Mock(name="read")
self.transporter.read.return_value = {}
self.transporter.write = mock.Mock(name="write")
self.transporter.write.return_value = {}
self.index = SearchIndex(self.transporter, self.config, "index-name")
def test_app_id_getter(self):
self.assertEqual(self.index.app_id, "foo")
def test_name_getter(self):
self.assertEqual(self.index.name, "index-name")
def test_exists(self):
with mock.patch.object(self.index, "get_settings") as submethod_mock:
submethod_mock.side_effect = RequestException("Index does not exist", 404)
indexExists = self.index.exists()
self.index.get_settings.assert_called_once()
self.assertEqual(indexExists, False)
# No request options
args = self.index.get_settings.call_args[0]
self.assertEqual(args[0], None)
self.assertEqual(indexExists, False)
with mock.patch.object(self.index, "get_settings") as submethod_mock:
submethod_mock.side_effect = RequestException("Permissions error", 400)
with self.assertRaises(RequestException) as _:
self.index.exists()
with mock.patch.object(self.index, "get_settings") as submethod_mock:
submethod_mock.return_value = {"hitsPerPage": 20, "maxValuesPerFacet": 100}
request_options = {"X-Algolia-User-ID": "Foo"}
indexExists = self.index.exists(request_options)
# With request options
args = self.index.get_settings.call_args[0]
self.assertEqual(args[0], request_options)
self.index.get_settings.assert_called_once()
self.assertEqual(indexExists, True)
with mock.patch.object(self.index, "get_settings") as submethod_mock:
submethod_mock.return_value = {"hitsPerPage": 20, "maxValuesPerFacet": 100}
indexExists = self.index.exists()
self.index.get_settings.assert_called_once()
self.assertEqual(indexExists, True)
def test_save_objects(self):
# Saving an object without object id
with self.assertRaises(MissingObjectIdException) as _:
self.index.save_objects([{"foo": "bar"}])
with self.assertRaises(MissingObjectIdException) as _:
self.index.save_objects(
[{"foo": "bar"}], {"autoGenerateObjectIDIfNotExist": False}
)
self.index.save_objects(
[{"foo": "bar"}], {"autoGenerateObjectIDIfNotExist": True}
)
self.transporter.write.assert_called_once_with(
"POST",
"1/indexes/index-name/batch",
{"requests": [{"action": "addObject", "body": {"foo": "bar"}}]},
{},
)
self.transporter.write = mock.Mock(name="write")
self.index.save_objects([{"foo": "bar", "objectID": "foo"}])
self.transporter.write.assert_called_once_with(
"POST",
"1/indexes/index-name/batch",
{
"requests": [
{
"action": "updateObject",
"body": {"foo": "bar", "objectID": "foo"},
}
]
},
None,
)
def test_partial_update_objects(self):
# Saving an object without object id
with self.assertRaises(MissingObjectIdException) as _:
self.index.partial_update_objects([{"foo": "bar"}])
with self.assertRaises(MissingObjectIdException) as _:
self.index.partial_update_objects(
[{"foo": "bar"}], {"createIfNotExists": False}
)
self.index.partial_update_objects([{"foo": "bar"}], {"createIfNotExists": True})
self.transporter.write.assert_called_once_with(
"POST",
"1/indexes/index-name/batch",
{"requests": [{"action": "partialUpdateObject", "body": {"foo": "bar"}}]},
{},
)
self.transporter.write = mock.Mock(name="write")
self.index.partial_update_objects([{"foo": "bar", "objectID": "foo"}])
self.transporter.write.assert_called_once_with(
"POST",
"1/indexes/index-name/batch",
{
"requests": [
{
"action": "partialUpdateObjectNoCreate",
"body": {"foo": "bar", "objectID": "foo"},
}
]
},
None,
)
def test_get_objects(self):
request_options = RequestOptions.create(self.config)
requests = [{"indexName": "index-name", "objectID": "foo_id"}]
self.index.get_objects(["foo_id"], request_options)
self.transporter.read.assert_called_once_with(
"POST",
"1/indexes/*/objects",
{"requests": requests}, # asserts version 2 it's used.
request_options,
)
def test_get_objects_with_attributes_to_retreive(self):
request_options = RequestOptions.create(
self.config, {"attributesToRetrieve": ["firstname", "lastname"]}
)
requests = [
{
"indexName": "index-name",
"objectID": "foo_id",
"attributesToRetrieve": ["firstname", "lastname"],
}
]
self.index.get_objects(["foo_id"], request_options)
self.transporter.read.assert_called_once_with(
"POST",
"1/indexes/*/objects",
{"requests": requests}, # asserts version 2 it's used.
request_options,
)
self.assertNotIn("attributesToRetrieve", request_options.data)
def test_get_objects_with_attributes_to_retreive_bulk(self):
request_options = RequestOptions.create(
self.config, {"attributesToRetrieve": ["firstname", "lastname"]}
)
requests = [
{
"indexName": "index-name",
"objectID": "foo_id",
"attributesToRetrieve": ["firstname", "lastname"],
},
{
"indexName": "index-name",
"objectID": "bar_id",
"attributesToRetrieve": ["firstname", "lastname"],
},
]
self.index.get_objects(["foo_id", "bar_id"], request_options)
self.transporter.read.assert_called_once_with(
"POST",
"1/indexes/*/objects",
{"requests": requests}, # asserts version 2 it's used.
request_options,
)
self.assertNotIn("attributesToRetrieve", request_options.data)
def test_get_settings(self):
self.transporter.read.return_value = {
"attributesToIndex": ["attr1", "attr2"],
"numericAttributesToIndex": ["attr1", "attr2"],
"slaves": ["index1", "index2"],
"ignorePlurals": True,
}
request_options = RequestOptions.create(self.config, {"foo": "bar"})
settings = self.index.get_settings(request_options)
self.transporter.read.assert_called_once_with(
"GET", "1/indexes/index-name/settings", None, request_options
)
self.assertEqual(request_options.query_parameters["getVersion"], 2)
self.assertEqual(
settings,
{
"searchableAttributes": ["attr1", "attr2"],
"numericAttributesForFiltering": ["attr1", "attr2"],
"replicas": ["index1", "index2"],
"ignorePlurals": True,
},
)
def test_get_settings_none_as_request_options(self):
self.index.get_settings()
args = self.transporter.read.call_args[0]
self.assertEqual(args[3].query_parameters["getVersion"], 2)
def test_get_settings_dict_as_request_options(self):
self.index.get_settings({"foo": "bar"})
args = self.transporter.read.call_args[0]
self.assertEqual(args[3].query_parameters["getVersion"], 2)
def test_get_settings_with_request_options(self):
request_options = RequestOptions.create(self.config, {"foo": "bar"})
self.index.get_settings(request_options)
args = self.transporter.read.call_args[0]
self.assertEqual(args[3].query_parameters["getVersion"], 2)
def test_save_synonyms(self):
# Test null response
self.index.save_synonyms([]).wait()
# Test object id validation
with self.assertRaises(MissingObjectIdException) as _:
self.index.save_synonyms([F.synonym(object_id=False)])
# Test object id validation
with self.assertRaises(MissingObjectIdException) as _:
self.index.save_synonym(F.synonym(object_id=False))
def test_save_rules(self):
# Test null response
self.index.save_rules([]).wait()
# Test object id validation
with self.assertRaises(MissingObjectIdException) as _:
self.index.save_rule({"foo": "bar"})
# Test object id validation
with self.assertRaises(MissingObjectIdException) as _:
self.index.save_rules([{"foo": "bar"}])
def test_find_object(self):
self.index.search = mock.Mock(name="search")
self.index.search.return_value = {"hits": [{"foo": "bar"}], "nbPages": 1}
self.index.find_object(lambda obj: True)
args, _ = self.index.search.call_args
self.assertEqual(args[0], "")
self.assertEqual(
args[1].data, RequestOptions.create(self.config, {"page": 0}).data
)
self.index.find_object(lambda obj: True, {"query": "foo", "hitsPerPage": 5})
args, _ = self.index.search.call_args
self.assertEqual(args[0], "foo")
self.assertEqual(
args[1].data,
RequestOptions.create(self.config, {"hitsPerPage": 5, "page": 0}).data,
)
self.index.find_object(
lambda obj: True,
RequestOptions.create(self.config, {"User-Agent": "blabla"}),
)
args, _ = self.index.search.call_args
self.assertEqual(args[0], "")
self.assertEqual(
args[1].data, RequestOptions.create(self.config, {"page": 0}).data
)
self.assertEqual(
args[1].headers,
RequestOptions.create(self.config, {"User-Agent": "blabla"}).headers,
)
def test_replace_all_objects(self):
self.index._create_temporary_name = mock.Mock(name="_create_temporary_name")
tmp_index_name = "index-name_tmp_bar"
self.index._create_temporary_name.return_value = tmp_index_name # noqa: E501
obj = F.obj()
self.index.replace_all_objects([obj])
# Asserts the operations of the replace all objects.
self.transporter.write.assert_has_calls(
[
mock.call(
"POST",
"1/indexes/index-name/operation",
{"operation": "copy", "destination": "index-name_tmp_bar"},
{"scope": ["settings", "synonyms", "rules"]},
),
mock.call(
"POST",
"1/indexes/index-name_tmp_bar/batch",
{"requests": [{"action": "updateObject", "body": obj}]},
None,
),
mock.call(
"POST",
"1/indexes/index-name_tmp_bar/operation",
{"operation": "move", "destination": "index-name"},
None,
),
]
)
self.index._transporter.read = mock.Mock(name="read")
self.index._transporter.read.return_value = {"status": "published"}
self.index._transporter.write = mock.Mock(name="write")
self.index._transporter.write.return_value = {"taskID": 1}
self.index.replace_all_objects([obj])
self.assertEqual(self.index._transporter.write.call_count, 3)
self.index.replace_all_objects([obj], {"safe": True})
self.assertEqual(self.index._transporter.write.call_count, 6) # 3+3
self.assertEqual(self.index._transporter.read.call_count, 3) # 3 waits
def test_get_task(self):
with self.assertRaises(AssertionError) as _:
self.index.get_task("")
def test_browse_objects_encode_path(self):
index = SearchIndex(self.transporter, self.config, "#index name_42#%23")
self.transporter.read.return_value = {"hits": [{"foo": "bar"}], "nbPages": 1}
index.browse_objects().next()
self.transporter.read.assert_called_once_with(
"POST",
"1/indexes/%23index%20name_42%23%2523/browse",
{},
None,
)
def test_browse_rules_encode_path(self):
index = SearchIndex(self.transporter, self.config, "#index name_42#%23")
self.transporter.read.return_value = {
"hits": [{"foo": "bar", "_highlightResult": "algolia"}],
"nbPages": 1,
}
index.browse_rules().next()
self.transporter.read.assert_called_once_with(
"POST",
"1/indexes/%23index%20name_42%23%2523/rules/search",
{"hitsPerPage": 1000, "page": 1},
None,
)
def test_browse_synonyms_encode_path(self):
index = SearchIndex(self.transporter, self.config, "#index name_42#%23")
self.transporter.read.return_value = {
"hits": [{"foo": "bar", "_highlightResult": "algolia"}],
"nbPages": 1,
}
index.browse_synonyms().next()
self.transporter.read.assert_called_once_with(
"POST",
"1/indexes/%23index%20name_42%23%2523/synonyms/search",
{"hitsPerPage": 1000, "page": 1},
None,
)
class NullResponse(Response):
def wait(self):
return self
| {
"content_hash": "e27aee795efcda227f576dff75933e83",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 88,
"avg_line_length": 34.87914691943128,
"alnum_prop": 0.563625246280318,
"repo_name": "algolia/algoliasearch-client-python",
"id": "e8cf3b3dffef65ff4f09849d668b1b73367a15c9",
"size": "14719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_search_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "239410"
}
],
"symlink_target": ""
} |
"""PEP 0492/Python 3.5+ tests for binary files."""
import io
from os.path import dirname, join
from aiofiles.threadpool import open as aioopen
import pytest
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_iteration(mode, buffering):
"""Test iterating over lines from a file."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
async with aioopen(filename, mode=mode, buffering=buffering) as file:
# Append mode needs us to seek.
await file.seek(0)
counter = 1
# The old iteration pattern:
while True:
line = await file.readline()
if not line:
break
assert line.strip() == b"line " + str(counter).encode()
counter += 1
counter = 1
await file.seek(0)
# The new iteration pattern:
async for line in file:
assert line.strip() == b"line " + str(counter).encode()
counter += 1
assert file.closed
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_readlines(mode, buffering):
"""Test the readlines functionality."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
with open(filename, mode="rb") as f:
expected = f.readlines()
async with aioopen(str(filename), mode=mode) as file:
# Append mode needs us to seek.
await file.seek(0)
actual = await file.readlines()
assert actual == expected
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb+", "wb", "ab"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_flush(mode, buffering, tmpdir):
"""Test flushing to a file."""
filename = "file.bin"
full_file = tmpdir.join(filename)
if "r" in mode:
full_file.ensure() # Read modes want it to already exist.
async with aioopen(str(full_file), mode=mode, buffering=buffering) as file:
await file.write(b"0") # Shouldn't flush.
if buffering == -1:
assert b"" == full_file.read_binary()
else:
assert b"0" == full_file.read_binary()
await file.flush()
assert b"0" == full_file.read_binary()
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb+", "wb+", "ab+"])
async def test_simple_peek(mode, tmpdir):
"""Test flushing to a file."""
filename = "file.bin"
full_file = tmpdir.join(filename)
full_file.write_binary(b"0123456789")
async with aioopen(str(full_file), mode=mode) as file:
if "a" in mode:
await file.seek(0) # Rewind for append modes.
peeked = await file.peek(1)
# Technically it's OK for the peek to return less bytes than requested.
if peeked:
assert peeked.startswith(b"0")
read = await file.read(1)
assert peeked.startswith(read)
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_read(mode, buffering):
"""Just read some bytes from a test file."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
async with aioopen(filename, mode=mode, buffering=buffering) as file:
await file.seek(0) # Needed for the append mode.
actual = await file.read()
assert b"" == (await file.read())
assert actual == open(filename, mode="rb").read()
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_staggered_read(mode, buffering):
"""Read bytes repeatedly."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
async with aioopen(filename, mode=mode, buffering=buffering) as file:
await file.seek(0) # Needed for the append mode.
actual = []
while True:
byte = await file.read(1)
if byte:
actual.append(byte)
else:
break
assert b"" == (await file.read())
expected = []
with open(filename, mode="rb") as f:
while True:
byte = f.read(1)
if byte:
expected.append(byte)
else:
break
assert actual == expected
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_seek(mode, buffering, tmpdir):
"""Test seeking and then reading."""
filename = "bigfile.bin"
content = b"0123456789" * 4 * io.DEFAULT_BUFFER_SIZE
full_file = tmpdir.join(filename)
full_file.write_binary(content)
async with aioopen(str(full_file), mode=mode, buffering=buffering) as file:
await file.seek(4)
assert b"4" == (await file.read(1))
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["wb", "rb", "rb+", "wb+", "ab", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_close_ctx_mgr_iter(mode, buffering, tmpdir):
"""Open a file, read a byte, and close it."""
filename = "bigfile.bin"
content = b"0" * 4 * io.DEFAULT_BUFFER_SIZE
full_file = tmpdir.join(filename)
full_file.write_binary(content)
async with aioopen(str(full_file), mode=mode, buffering=buffering) as file:
assert not file.closed
assert not file._file.closed
assert file.closed
assert file._file.closed
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["wb", "rb", "rb+", "wb+", "ab", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_close_ctx_mgr(mode, buffering, tmpdir):
"""Open a file, read a byte, and close it."""
filename = "bigfile.bin"
content = b"0" * 4 * io.DEFAULT_BUFFER_SIZE
full_file = tmpdir.join(filename)
full_file.write_binary(content)
file = await aioopen(str(full_file), mode=mode, buffering=buffering)
assert not file.closed
assert not file._file.closed
await file.close()
assert file.closed
assert file._file.closed
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_readinto(mode, buffering):
"""Test the readinto functionality."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
async with aioopen(filename, mode=mode, buffering=buffering) as file:
await file.seek(0) # Needed for the append mode.
array = bytearray(4)
bytes_read = await file.readinto(array)
assert bytes_read == 4
assert array == open(filename, mode="rb").read(4)
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb+", "wb", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_truncate(mode, buffering, tmpdir):
"""Test truncating files."""
filename = "bigfile.bin"
content = b"0123456789" * 4 * io.DEFAULT_BUFFER_SIZE
full_file = tmpdir.join(filename)
full_file.write_binary(content)
async with aioopen(str(full_file), mode=mode, buffering=buffering) as file:
# The append modes want us to seek first.
await file.seek(0)
if "w" in mode:
# We've just erased the entire file.
await file.write(content)
await file.flush()
await file.seek(0)
await file.truncate()
assert b"" == full_file.read_binary()
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["wb", "rb+", "wb+", "ab", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_simple_write(mode, buffering, tmpdir):
"""Test writing into a file."""
filename = "bigfile.bin"
content = b"0" * 4 * io.DEFAULT_BUFFER_SIZE
full_file = tmpdir.join(filename)
if "r" in mode:
full_file.ensure() # Read modes want it to already exist.
async with aioopen(str(full_file), mode=mode, buffering=buffering) as file:
bytes_written = await file.write(content)
assert bytes_written == len(content)
assert content == full_file.read_binary()
@pytest.mark.asyncio
async def test_simple_detach(tmpdir):
"""Test detaching for buffered streams."""
filename = "file.bin"
full_file = tmpdir.join(filename)
full_file.write_binary(b"0123456789")
with pytest.raises(ValueError):
async with aioopen(str(full_file), mode="rb") as file:
raw_file = file.detach()
assert raw_file
with pytest.raises(ValueError):
await file.read()
assert b"0123456789" == raw_file.read(10)
@pytest.mark.asyncio
async def test_simple_readall(tmpdir):
"""Test the readall function by reading a large file in.
Only RawIOBase supports readall().
"""
filename = "bigfile.bin"
content = b"0" * 4 * io.DEFAULT_BUFFER_SIZE # Hopefully several reads.
sync_file = tmpdir.join(filename)
sync_file.write_binary(content)
file = await aioopen(str(sync_file), mode="rb", buffering=0)
actual = await file.readall()
assert actual == content
await file.close()
assert file.closed
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_name_property(mode, buffering):
"""Test iterating over lines from a file."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
async with aioopen(filename, mode=mode, buffering=buffering) as file:
assert file.name == filename
assert file.closed
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["rb", "rb+", "ab+"])
@pytest.mark.parametrize("buffering", [-1, 0])
async def test_mode_property(mode, buffering):
"""Test iterating over lines from a file."""
filename = join(dirname(__file__), "..", "resources", "multiline_file.txt")
async with aioopen(filename, mode=mode, buffering=buffering) as file:
assert file.mode == mode
assert file.closed
| {
"content_hash": "e50f9e52467b0532ffed4d103a48c379",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 79,
"avg_line_length": 30.446428571428573,
"alnum_prop": 0.6218963831867058,
"repo_name": "Tinche/aiofiles",
"id": "faf72a09360160aee28460c46145d99f71a44f5d",
"size": "10230",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/threadpool/test_binary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "52"
},
{
"name": "Python",
"bytes": "60965"
}
],
"symlink_target": ""
} |
import numpy as np
from utils import gen_tpm_inputs, gen_ppm_inputs
class PermutationParityTrainer(object):
"""Permutation Parity Trainer
Used to train two Permutation Parity Machines (PPM) to sync weights, as well as involving a third snooper model.
"""
def __init__(self):
pass
def train_step(self, model_a, model_b):
"""Updates model a and model b if they have the same tau given the random input.
Args:
model_a (PermutationParityMachine): Model used to sync weights with Model B.
model_b (PermutationParityMachine): Model used to sync weights with Model A.
Returns:
(bool). If True, then Model A and B updated.
"""
if not self.hyperparams_same(model_a, model_b):
raise ValueError("Models are incompatible. Need same hyper-parameters (K, N, L).")
K, N, L = model_a.get_hyper_params()
x = gen_ppm_inputs(K, N)
tau_a = model_a.output(x)
tau_b = model_b.output(x)
if tau_a == tau_b:
model_a.update(tau_b)
model_b.update(tau_a)
return tau_a == tau_b
def train_step(self, model_a, model_b, eve):
"""Updates model a and model b if they have the same tau given the random input.
Will also update the snooper model, eve, if matches with model a and model b.
Args:
model_a (PermutationParityMachine): Model used to sync weights with Model B.
model_b (PermutationParityMachine): Model used to sync weights with Model A.
eve (PermutationParityMachine): Snooper model attempting to sync with Model A and B.
Returns:
(tuple). If first element is True, then Model A and B updated.
If the second element is True then Eve also updated.
"""
if not self.hyperparams_same(model_a, model_b) or not self.hyperparams_same(model_a, eve):
raise ValueError("Models are incompatible. Need same hyper-parameters (K, N).")
K, N, L = model_a.get_hyper_params()
x = gen_ppm_inputs(K, N)
tau_a = model_a.output(x)
tau_b = model_b.output(x)
tau_eve = eve.output(x)
if tau_a == tau_b:
model_a.update(tau_b)
model_b.update(tau_a)
if tau_a == tau_eve:
eve.update(tau_a)
return (tau_a == tau_b), (tau_a == tau_b) and (tau_a == tau_eve)
def train(self, model_a, model_b, total_iter=np.infty, print_step=None):
"""Runs through several training steps with model A and B, attempting to have a closer match.
Args:
model_a (PermutationParityMachine): Model used to sync weights with Model B.
model_b (PermutationParityMachine): Model used to sync weights with Model A.
total_iter (int): The total number of training steps to run or until matching.
Can be set to np.infty, which will run the training until Model A matches B.
(Optional: np.infty)
print_step (int): Prints training information every print_step. If None, won't print anything.
(i.e. print_step = 100 would print information every 100 steps)
(Optional: None)
Returns:
(list).
trained (bool): If True, Model A and B are synced.
n_iter (int): Number iterations it took to train Model A and B.
progress (list): Consists of the matching percentage between Model A and B each iteration.
"""
trained = False
n_iter = 0
progress = []
while total_iter > 0 or not trained:
progress.append(np.equal(model_a.get_key() == model_b.get_key(), True).sum() *
1. / np.prod(model_a.get_key().shape))
if np.array_equal(model_a.weights, model_b.weights):
trained = True
break
if print_step is not None and ((n_iter + 1) % print_step) == 0:
print "Step:", n_iter + 1
print "Percent Match (A & B):", progress[-1][0]
print ""
self.train_step(model_a, model_b)
n_iter += 1
return [trained, n_iter, progress]
def train(self, model_a, model_b, eve, total_iter=np.infty, print_step=None):
"""Runs through several training steps with model A and B, attempting to have a closer match.
Args:
model_a (PermutationParityMachine): Model used to sync weights with Model B.
model_b (PermutationParityMachine): Model used to sync weights with Model A.
eve (PermutationParityMachine): Model used to snoop on Models A and B.
total_iter (int): The total number of training steps to run or until matching.
Can be set to np.infty, which will run the training until Model A matches B.
(Optional: np.infty)
print_step (int): Prints training information every print_step. If None, won't print anything.
(i.e. print_step = 100 would print information every 100 steps)
(Optional: None)
Returns:
(list).
trained (bool): If True, Model A and B are synced.
n_iter (int): Number iterations it took to train Model A and B.
progress (list): Consists of the matching percentage between Model A, B, and Eve each iteration.
"""
trained = False
n_iter = 0
progress = []
while total_iter > 0 or not trained:
progress.append([np.equal(model_a.get_key(), model_b.get_key()).sum() *
1. / np.prod(model_a.get_key().shape),
np.equal(model_a.get_key(), eve.get_key()).sum() *
1. / np.prod(model_a.get_key().shape),
np.equal(model_b.get_key(), eve.get_key()).sum() *
1. / np.prod(model_a.get_key().shape)])
if np.array_equal(model_a.weights, model_b.weights):
trained = True
break
if print_step is not None and ((n_iter + 1) % print_step) == 0:
print "Step:", n_iter + 1
print "Percent Match (A & B):", progress[-1][0]
print "Percent Match (A & Eve):", progress[-1][1]
print "Percent Match (B & Eve):", progress[-1][2]
print ""
self.train_step(model_a, model_b, eve)
n_iter += 1
return [trained, n_iter, progress]
@staticmethod
def hyperparams_same(model_a, model_b):
"""Confirms that two models have the same hyper-parameters for their models.
Args:
model_a (PermutationParityMachine): Compared with model_b.
model_b (PermutationParityMachine): Compared with model_a.
Returns:
Boolean. True if the hyper-parameters are the same, False otherwise.
"""
if model_a.get_hyper_params() == model_b.get_hyper_params():
return True
return False
class TreeParityTrainer(object):
"""Tree Parity Trainer
Used to train two Tree Parity Machines (TPM) to sync weights, as well as involving a third snooper model.
"""
def __init__(self):
pass
def train_step(self, model_a, model_b):
"""Updates model a and model b if they have the same tau given the random input.
Args:
model_a (TreeParityMachine): Model used to sync weights with Model B.
model_b (TreeParityMachine): Model used to sync weights with Model A.
Returns:
(bool). If True, then Model A and B updated.
"""
if not self.hyperparams_same(model_a, model_b):
raise ValueError("Models are incompatible. Need same hyper-parameters (K, N, L).")
x = gen_tpm_inputs(model_a.k, model_a.n)
tau_a = model_a.output(x)
tau_b = model_b.output(x)
if tau_a == tau_b:
model_a.update(tau_b)
model_b.update(tau_a)
return tau_a == tau_b
def train_step(self, model_a, model_b, eve):
"""Updates model a and model b if they have the same tau given the random input.
Will also update the snooper model, eve, if matches with model a and model b.
Args:
model_a (TreeParityMachine): Model used to sync weights with Model B.
model_b (TreeParityMachine): Model used to sync weights with Model A.
eve (TreeParityMachine): Snooper model attempting to sync with Model A and B.
Returns:
(tuple). If first element is True, then Model A and B updated.
If the second element is True then Eve also updated.
"""
if not self.hyperparams_same(model_a, model_b) or not self.hyperparams_same(model_a, eve):
raise ValueError("Models are incompatible. Need same hyper-parameters (K, N, L).")
K, N, L = model_a.get_hyper_params()
x = gen_tpm_inputs(K, N)
tau_a = model_a.output(x)
tau_b = model_b.output(x)
tau_eve = eve.output(x)
if tau_a == tau_b:
model_a.update(tau_b)
model_b.update(tau_a)
if tau_a == tau_eve:
eve.update(tau_a)
return (tau_a == tau_b), (tau_a == tau_b) and (tau_a == tau_eve)
def train(self, model_a, model_b, total_iter=np.infty, print_step=None):
"""Runs through several training steps with model A and B, attempting to have a closer match.
Args:
model_a (TreeParityMachine): Model used to sync weights with Model B.
model_b (TreeParityMachine): Model used to sync weights with Model A.
total_iter (int): The total number of training steps to run or until matching.
Can be set to np.infty, which will run the training until Model A matches B.
(Optional: np.infty)
print_step (int): Prints training information every print_step. If None, won't print anything.
(i.e. print_step = 100 would print information every 100 steps)
(Optional: None)
Returns:
(list).
trained (bool): If True, Model A and B are synced.
n_iter (int): Number iterations it took to train Model A and B.
progress (list): Consists of the matching percentage between Model A and B each iteration.
"""
trained = False
n_iter = 0
progress = []
while total_iter > 0 or not trained:
progress.append(np.equal(model_a.get_key() == model_b.get_key(), True).sum() / np.prod(model_a.shape))
if np.array_equal(model_a.get_key(), model_b.get_key()):
trained = True
break
if print_step is not None and ((n_iter + 1) % print_step) == 0:
print "Step:", n_iter + 1
print "Percent Match (A & B):", progress[-1][0]
print "Percent Match (A & Eve):", progress[-1][1]
print "Percent Match (B & Eve):", progress[-1][2]
print ""
self.train_step(model_a, model_b)
n_iter += 1
return [trained, n_iter, progress]
def train(self, model_a, model_b, eve, total_iter=np.infty, print_step=None):
"""Runs through several training steps with model A and B, attempting to have a closer match.
Args:
model_a (TreeParityMachine): Model used to sync weights with Model B.
model_b (TreeParityMachine): Model used to sync weights with Model A.
eve (TreeParityMachine): Snooper model attempting to sync with Model A and B.
total_iter (int): The total number of training steps to run or until matching.
Can be set to np.infty, which will run the training until Model A matches B.
(Optional: np.infty)
print_step (int): Prints training information every print_step. If None, won't print anything.
(i.e. print_step = 100 would print information every 100 steps)
(Optional: None)
Returns:
(list).
trained (bool): If True, Model A and B are synced.
n_iter (int): Number iterations it took to train Model A and B.
progress (list): Consists of the matching percentage between Model A, B, and Eve each iteration.
"""
trained = False
n_iter = 0
progress = []
while total_iter > 0 or not trained:
progress.append([np.equal(model_a.get_key(), model_b.get_key()).sum() *
1. / np.prod(model_a.get_key().shape),
np.equal(model_a.get_key(), eve.get_key()).sum() *
1. / np.prod(model_a.get_key().shape),
np.equal(model_b.get_key(), eve.get_key()).sum() *
1. / np.prod(model_a.get_key().shape)])
if np.array_equal(model_a.get_key(), model_b.get_key()):
trained = True
break
if print_step is not None and ((n_iter + 1) % print_step) == 0:
print "Step:", n_iter + 1
print "Percent Match (A & B):", progress[-1][0]
print "Percent Match (A & Eve):", progress[-1][1]
print "Percent Match (B & Eve):", progress[-1][2]
print ""
self.train_step(model_a, model_b, eve)
n_iter += 1
return [trained, n_iter, progress]
@staticmethod
def hyperparams_same(model_a, model_b):
"""Confirms that two models have the same hyper-parameters for their models.
Args:
model_a (TreeParityMachine): Compared with model_b.
model_b (TreeParityMachine): Compared with model_a.
Returns:
Boolean. True if the hyper-parameters are the same, False otherwise.
"""
if model_a.get_hyper_params() == model_b.get_hyper_params():
return True
return False
| {
"content_hash": "5b267a649674b42558124e4b4e5a4d59",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 116,
"avg_line_length": 42.166666666666664,
"alnum_prop": 0.5662761151891587,
"repo_name": "johnsbuck/neural_cryptography",
"id": "44c47e9b0b4fe2faff328d27fbb98c5becf12230",
"size": "14168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parity_machine/trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39016"
}
],
"symlink_target": ""
} |
import mock
from openstack.tests.unit import base
from openstack.clustering.v1 import cluster
FAKE_ID = '092d0955-2645-461a-b8fa-6a44655cdb2c'
FAKE_NAME = 'test_cluster'
FAKE = {
'id': 'IDENTIFIER',
'config': {'key1': 'value1', 'key2': 'value2'},
'desired_capacity': 1,
'max_size': 3,
'min_size': 0,
'name': FAKE_NAME,
'profile_id': 'myserver',
'profile_only': True,
'metadata': {},
'dependents': {},
'timeout': None,
'init_at': '2015-10-10T12:46:36.000000',
'created_at': '2015-10-10T12:46:36.000000',
'updated_at': '2016-10-10T12:46:36.000000',
}
FAKE_CREATE_RESP = {
'cluster': {
'action': 'a679c926-908f-49e7-a822-06ca371e64e1',
'init_at': '2015-10-10T12:46:36.000000',
'created_at': '2015-10-10T12:46:36.000000',
'updated_at': '2016-10-10T12:46:36.000000',
'data': {},
'desired_capacity': 1,
'domain': None,
'id': FAKE_ID,
'init_time': None,
'max_size': 3,
'metadata': {},
'min_size': 0,
'name': 'test_cluster',
'nodes': [],
'policies': [],
'profile_id': '560a8f9d-7596-4a32-85e8-03645fa7be13',
'profile_name': 'myserver',
'project': '333acb15a43242f4a609a27cb097a8f2',
'status': 'INIT',
'status_reason': 'Initializing',
'timeout': None,
'user': '6d600911ff764e54b309ce734c89595e',
'dependents': {},
}
}
class TestCluster(base.TestCase):
def setUp(self):
super(TestCluster, self).setUp()
def test_basic(self):
sot = cluster.Cluster()
self.assertEqual('cluster', sot.resource_key)
self.assertEqual('clusters', sot.resources_key)
self.assertEqual('/clusters', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_instantiate(self):
sot = cluster.Cluster(**FAKE)
self.assertEqual(FAKE['id'], sot.id)
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['profile_id'], sot.profile_id)
self.assertEqual(FAKE['min_size'], sot.min_size)
self.assertEqual(FAKE['max_size'], sot.max_size)
self.assertEqual(FAKE['desired_capacity'], sot.desired_capacity)
self.assertEqual(FAKE['config'], sot.config)
self.assertEqual(FAKE['timeout'], sot.timeout)
self.assertEqual(FAKE['metadata'], sot.metadata)
self.assertEqual(FAKE['init_at'], sot.init_at)
self.assertEqual(FAKE['created_at'], sot.created_at)
self.assertEqual(FAKE['updated_at'], sot.updated_at)
self.assertEqual(FAKE['dependents'], sot.dependents)
self.assertTrue(sot.is_profile_only)
self.assertDictEqual({"limit": "limit",
"marker": "marker",
"name": "name",
"status": "status",
"sort": "sort",
"global_project": "global_project"},
sot._query_mapping._mapping)
def test_scale_in(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.scale_in(sess, 3))
url = 'clusters/%s/actions' % sot.id
body = {'scale_in': {'count': 3}}
sess.post.assert_called_once_with(url,
json=body)
def test_scale_out(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.scale_out(sess, 3))
url = 'clusters/%s/actions' % sot.id
body = {'scale_out': {'count': 3}}
sess.post.assert_called_once_with(url,
json=body)
def test_resize(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.resize(sess, foo='bar', zoo=5))
url = 'clusters/%s/actions' % sot.id
body = {'resize': {'foo': 'bar', 'zoo': 5}}
sess.post.assert_called_once_with(url,
json=body)
def test_add_nodes(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.add_nodes(sess, ['node-33']))
url = 'clusters/%s/actions' % sot.id
body = {'add_nodes': {'nodes': ['node-33']}}
sess.post.assert_called_once_with(url,
json=body)
def test_del_nodes(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.del_nodes(sess, ['node-11']))
url = 'clusters/%s/actions' % sot.id
body = {'del_nodes': {'nodes': ['node-11']}}
sess.post.assert_called_once_with(url,
json=body)
def test_del_nodes_with_params(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
params = {
'destroy_after_deletion': True,
}
self.assertEqual('', sot.del_nodes(sess, ['node-11'], **params))
url = 'clusters/%s/actions' % sot.id
body = {
'del_nodes': {
'nodes': ['node-11'],
'destroy_after_deletion': True,
}
}
sess.post.assert_called_once_with(url,
json=body)
def test_replace_nodes(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.replace_nodes(sess, {'node-22': 'node-44'}))
url = 'clusters/%s/actions' % sot.id
body = {'replace_nodes': {'nodes': {'node-22': 'node-44'}}}
sess.post.assert_called_once_with(url,
json=body)
def test_policy_attach(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
params = {
'enabled': True,
}
self.assertEqual('', sot.policy_attach(sess, 'POLICY', **params))
url = 'clusters/%s/actions' % sot.id
body = {
'policy_attach': {
'policy_id': 'POLICY',
'enabled': True,
}
}
sess.post.assert_called_once_with(url,
json=body)
def test_policy_detach(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.policy_detach(sess, 'POLICY'))
url = 'clusters/%s/actions' % sot.id
body = {'policy_detach': {'policy_id': 'POLICY'}}
sess.post.assert_called_once_with(url,
json=body)
def test_policy_update(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
params = {
'enabled': False
}
self.assertEqual('', sot.policy_update(sess, 'POLICY', **params))
url = 'clusters/%s/actions' % sot.id
body = {
'policy_update': {
'policy_id': 'POLICY',
'enabled': False
}
}
sess.post.assert_called_once_with(url,
json=body)
def test_check(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.check(sess))
url = 'clusters/%s/actions' % sot.id
body = {'check': {}}
sess.post.assert_called_once_with(url,
json=body)
def test_recover(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.recover(sess))
url = 'clusters/%s/actions' % sot.id
body = {'recover': {}}
sess.post.assert_called_once_with(url,
json=body)
def test_operation(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.post = mock.Mock(return_value=resp)
self.assertEqual('', sot.op(sess, 'dance', style='tango'))
url = 'clusters/%s/ops' % sot.id
body = {'dance': {'style': 'tango'}}
sess.post.assert_called_once_with(url,
json=body)
def test_force_delete(self):
sot = cluster.Cluster(**FAKE)
resp = mock.Mock()
fake_action_id = 'f1de9847-2382-4272-8e73-cab0bc194663'
resp.headers = {'Location': fake_action_id}
resp.json = mock.Mock(return_value={"foo": "bar"})
resp.status_code = 200
sess = mock.Mock()
sess.delete = mock.Mock(return_value=resp)
res = sot.force_delete(sess)
self.assertEqual(fake_action_id, res.id)
url = 'clusters/%s' % sot.id
body = {'force': True}
sess.delete.assert_called_once_with(url, json=body)
| {
"content_hash": "f0013b43518de1d0dead0d2e1202f9e4",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 77,
"avg_line_length": 33.57188498402556,
"alnum_prop": 0.5215074229158736,
"repo_name": "openstack/python-openstacksdk",
"id": "ac6eab76f9f6e82ef64fef8678be639760eac777",
"size": "11054",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/clustering/v1/test_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3804005"
},
{
"name": "Shell",
"bytes": "9027"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
# noinspection PyUnresolvedReferences
from h2o.utils.compatibility import * # NOQA
import h2o
from h2o.utils.ext_dependencies import get_matplotlib_pyplot
from h2o.utils.shared_utils import can_use_pandas
from .model_base import ModelBase
from .metrics_base import * # NOQA
class H2ODimReductionModel(ModelBase):
"""
Dimension reduction model, such as PCA or GLRM.
"""
def varimp(self, use_pandas=False):
"""
Return the Importance of components associcated with a pca model.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame. (Default is False.)
"""
model = self._model_json["output"]
if "importance" in list(model.keys()) and model["importance"]:
vals = model["importance"].cell_values
header = model["importance"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have importances of components.")
def num_iterations(self):
"""Get the number of iterations that it took to converge or reach max iterations."""
o = self._model_json["output"]
return o["model_summary"]["number_of_iterations"][0]
def objective(self):
"""Get the final value of the objective function."""
o = self._model_json["output"]
return o["model_summary"]["final_objective_value"][0]
def final_step(self):
"""Get the final step size for the model."""
o = self._model_json["output"]
return o["model_summary"]["final_step_size"][0]
def archetypes(self):
"""The archetypes (Y) of the GLRM model."""
o = self._model_json["output"]
yvals = o["archetypes"].cell_values
archetypes = []
for yidx, yval in enumerate(yvals):
archetypes.append(list(yvals[yidx])[1:])
return archetypes
def reconstruct(self, test_data, reverse_transform=False):
"""
Reconstruct the training data from the model and impute all missing values.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the reconstructed frame.
:returns: the approximate reconstruction of the training data.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"reconstruct_train": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
def proj_archetypes(self, test_data, reverse_transform=False):
"""
Convert archetypes of the model into original feature space.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the projected archetypes.
:returns: model archetypes projected back into the original training data's feature space.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"project_archetypes": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
def screeplot(self, type="barplot", server=False):
"""
Produce the scree plot.
Library ``matplotlib`` is required for this function.
:param str type: either ``"barplot"`` or ``"lines"``.
:param bool server: if true set server settings to matplotlib and do not show the graph
"""
# check for matplotlib. exit if absent.
plt = get_matplotlib_pyplot(server)
if plt is None:
return
variances = [s ** 2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(list(range(1, len(variances) + 1)))
if type == "barplot":
plt.bar(list(range(1, len(variances) + 1)), variances)
elif type == "lines":
plt.plot(list(range(1, len(variances) + 1)), variances, 'b--')
if not server:
plt.show()
| {
"content_hash": "de36cd03edc9642ddd40450f45db13c7",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 131,
"avg_line_length": 39.959016393442624,
"alnum_prop": 0.6285128205128205,
"repo_name": "michalkurka/h2o-3",
"id": "04b5a37f09742ae6993f6bb5fecf57e4f782efd2",
"size": "4901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/h2o/model/dim_reduction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
} |
"""Circulation JSON Resolver module."""
| {
"content_hash": "49f975ccd54ce4237d0b532f4f7b188a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.725,
"repo_name": "inveniosoftware/invenio-circulation",
"id": "1f7d1eb5f9d1808151eff7e76facdc9ca17b6299",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_circulation/records/jsonresolver/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "174897"
},
{
"name": "Shell",
"bytes": "1051"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.contrib.formtools.preview import FormPreview
from django.contrib import messages
from django.contrib.auth import login, authenticate
from selvbetjening.core.user.models import SUser
from selvbetjening.businesslogic.members.forms import UserRegistrationForm, ProfileEditForm, UserWebsiteFormSet
from selvbetjening.frontend.userportal.forms import ChangePasswordForm, ChangePictureForm, \
PrivacyForm, ChangeUsernameForm
from selvbetjening.frontend.userportal.processor_handlers import profile_page_processors
from selvbetjening.frontend.userportal.models import UserPrivacy
def profile_redirect(request):
if isinstance(request.user, AnonymousUser):
return HttpResponseRedirect(reverse('members_login'))
else:
return HttpResponseRedirect(reverse('members_profile'))
@login_required
def public_profile_page(request,
username,
template_name='userportal/public_profile.html',
template_no_access_name='userportal/profile_no_access.html'):
user = get_object_or_404(SUser, username=username)
privacy, created = UserPrivacy.objects.get_or_create(user=user)
own_profile = False
if privacy.public_profile:
handler = profile_page_processors.get_handler(request, user)
add_to_profile = handler.view(own_profile)
return render(request,
template_name,
{
'viewed_user': user,
'privacy': privacy,
'add_to_profile': add_to_profile
})
else:
return render(request,
template_no_access_name,
{
'username': user.username
})
@login_required
def profile(request,
template_name='userportal/profile.html'):
user = request.user
privacy = UserPrivacy.full_access()
own_profile = True
own_privacy, created = UserPrivacy.objects.get_or_create(user=user)
handler = profile_page_processors.get_handler(request, user)
add_to_profile = handler.view(own_profile)
return render(request,
template_name,
{
'viewed_user': user,
'privacy': privacy,
'own_privacy': own_privacy,
'add_to_profile': add_to_profile
})
@login_required
def edit_profile(request,
template_name='userportal/edit_profile.html',
success_page='userportal_profile',
form_class=ProfileEditForm):
user = request.user
if request.method == 'POST':
form = form_class(request.POST, instance=user)
website_form = UserWebsiteFormSet(request.POST, instance=user)
if form.is_valid() and website_form.is_valid():
form.save()
website_form.save()
messages.success(request, _(u'Personal information updated'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class(instance=user)
website_form = UserWebsiteFormSet(instance=user)
return render(request,
template_name,
{
'form': form,
'website_form': website_form
})
@login_required
def edit_privacy(request,
form_class=PrivacyForm,
template_name='userportal/edit_privacy.html',
success_page='userportal_profile'):
privacy, created = UserPrivacy.objects.get_or_create(user=request.user)
if request.method == 'POST':
form = form_class(request.POST, instance=privacy)
if form.is_valid:
form.save()
messages.success(request, _(u'Privacy settings updated'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class(instance=privacy)
return render(request,
template_name,
{
'form': form
})
@login_required
def edit_picture(request,
form_class=ChangePictureForm,
success_page='userportal_profile',
template_name='userportal/edit_picture.html'):
profile = request.user
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
profile.picture = form.cleaned_data['picture']
profile.save()
messages.success(request, _(u'Profile picture changed'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class()
return render(request,
template_name,
{
'form': form,
'user': profile
})
@login_required
def edit_password(request,
template_name='userportal/edit_password.html',
post_change_redirect='userportal_profile',
change_password_form=ChangePasswordForm):
if request.method == 'POST':
form = change_password_form(request.user, request.POST)
if form.is_valid():
form.save()
messages.success(request, _(u'Password changed'))
return HttpResponseRedirect(reverse(post_change_redirect))
else:
form = change_password_form(request.user)
return render(request,
template_name,
{
'form': form,
})
class UsernameChangeView(FormPreview):
preview_template = 'userportal/edit_username_confirmed.html'
form_template = 'userportal/edit_username.html'
def __call__(self, request, *args, **kwargs):
return super(UsernameChangeView, self).__call__(request, *args, **kwargs)
def process_preview(self, request, form, context):
context['new_username'] = form.cleaned_data['new_username']
def done(self, request, cleaned_data):
request.user.username = cleaned_data['new_username']
request.user.save()
messages.success(request, _(u'Username changed'))
return HttpResponseRedirect(reverse('userportal_profile'))
edit_username = login_required(UsernameChangeView(ChangeUsernameForm))
def register(request,
success_page,
form_class=UserRegistrationForm,
login_on_success=False,
template_name='userportal/registration.html'):
""" Allows a new user to register an account.
success_page -- a reversable view name or a function returning
an url. The function takes a request and a user
object as input.
"""
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
user = form.save()
if login_on_success:
user = authenticate(username=user.username, password=request.POST['password'])
login(request, user)
if callable(success_page):
return HttpResponseRedirect(success_page(request, user))
else:
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class()
return render(request,
template_name,
{
'form': form
}) | {
"content_hash": "f0344bbb0ed5e6bf5216313727597c77",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 111,
"avg_line_length": 31.649193548387096,
"alnum_prop": 0.5951076570263728,
"repo_name": "animekita/selvbetjening",
"id": "b014f0aa6007ec77384c1c188cc276908f2aba5c",
"size": "7865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selvbetjening/frontend/userportal/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17368"
},
{
"name": "HTML",
"bytes": "107945"
},
{
"name": "JavaScript",
"bytes": "4914"
},
{
"name": "Python",
"bytes": "881254"
},
{
"name": "Shell",
"bytes": "312"
}
],
"symlink_target": ""
} |
"""This script is to automate the process of install modules
Blender's internal version of Python
How to use it:
1. Open Blender with Admin permissions if Blender is
installed in the default location (at least with windows)
2. Open the scripting scene
3. Open the script in the "Text Editor" window
(Optionally click "Window" in the top left and then "Toggle System Console")
4. Push the play button to run the script
5. Wait despite it looking frozen
(You can look at the system console if you opened it to comfort yourself)
"""
#WARNING THE MACOS AND LINUX FUNCTIONS ARE NOT CURRENTLY CODED
#if you are working on one of those OS's
#I trust you can follow my windows pleb example and run the commands for your own OS
dependencies = ["numpy", "pandas", "libneuroml", "xlrd", "matplotlib"]
#if you get an import error on the 3rd party module, add it to this list
import os
import sys
import subprocess
#add NueronBlenderImaging folder to Blender Python path
blendFileDir = os.path.dirname(os.path.realpath(__file__))
blenderWorkingPath = os.getcwd()
scriptPath = os.path.abspath(os.path.join(blendFileDir, "..", "..", "NeuronBlenderImaging"))
print("""
==============================PATH LOCATIONS==============================
blendFileDir %s
blenderWorkingPath %s
scriptPath %s\n\n""" % (blendFileDir, blenderWorkingPath, scriptPath))
sys.path.insert(0, scriptPath)
def install_4win():
for package in dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def install_4linux():
#use the subprocess module to send commands to your console using bash
#or whatever you linux peeps use
#for package in dependencies:
#subprocess.check_call([#enter in your commands here!!! package])
pass
def install_4macos():
#use the subprocess module to send commands to your console using zsh
#or whatever you macos peeps use
#for package in dependencies:
#subprocess.check_call([#enter in your commands here!!! package])
pass
def main():
#sys.platform names for various os's
win_systems = ["win32", "cygwin", "msys"]
macos_systems = ["darwin", "os2", "os2emx"]
linux_systems = ["linux", "linux2"]
if sys.platform in win_systems:
install_4win()
elif sys.platform in linux_systems:
install_4linux()
elif sys.platform in macos_systems:
install_4macos()
if __name__ == "__main__":
main() | {
"content_hash": "640951fe25a7d27e8aa427824d16e555",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 92,
"avg_line_length": 33.397260273972606,
"alnum_prop": 0.6936013125512716,
"repo_name": "openworm/Blender2NeuroML",
"id": "56e665b6b237c6fce8f150d9770c28f35e9946c0",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/NeuronBlenderImaging/installDependencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148611"
}
],
"symlink_target": ""
} |
import vtk
def main():
colors = vtk.vtkNamedColors()
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(12)
sphere.SetThetaResolution(12)
colorIt = vtk.vtkElevationFilter()
colorIt.SetInputConnection(sphere.GetOutputPort())
colorIt.SetLowPoint(0, 0, -1)
colorIt.SetHighPoint(0, 0, 1)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(colorIt.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("SlateGray"))
renWin.SetSize(640, 480)
renWin.Render()
# Interact with the data.
iren.Start()
if __name__ == '__main__':
main()
| {
"content_hash": "87d41ccd946f582e38af88098ed5d126",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 58,
"avg_line_length": 23.05263157894737,
"alnum_prop": 0.6860730593607306,
"repo_name": "lorensen/VTKExamples",
"id": "ac6fd4d6b8108f7406702f3e128be91ed72be410",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/Rendering/ColoredSphere.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
} |
'''
Implements the targetcli target related UI.
This file is part of targetcli.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
try:
import ethtool
except ImportError:
ethtool = None
import os
import six
import stat
from configshell_fb import ExecutionError
from rtslib_fb import RTSLibBrokenLink, RTSLibError
from rtslib_fb import MappedLUN, NetworkPortal, NodeACL
from rtslib_fb import LUN, Target, TPG, StorageObjectFactory
from .ui_backstore import complete_path
from .ui_node import UINode, UIRTSLibNode
auth_params = ('userid', 'password', 'mutual_userid', 'mutual_password')
discovery_params = auth_params + ("enable",)
class UIFabricModule(UIRTSLibNode):
'''
A fabric module UI.
'''
def __init__(self, fabric_module, parent):
super(UIFabricModule, self).__init__(fabric_module.name,
fabric_module, parent,
late_params=True)
self.refresh()
if self.rtsnode.has_feature('discovery_auth'):
for param in discovery_params:
self.define_config_group_param('discovery_auth',
param, 'string')
self.refresh()
# Support late params
#
# By default the base class will call list_parameters and list_attributes
# in init. This stops us from being able to lazy-load fabric modules.
# We declare we support "late_params" to stop this, and then
# this code overrides the base class methods that involve enumerating
# this stuff, so we don't need to call list_parameters/attrs (which
# would cause the module to load) until the ui is actually asking for
# them from us.
# Currently fabricmodules don't have these anyways, this is all a CYA thing.
def list_config_groups(self):
groups = super(UIFabricModule, self).list_config_groups()
if len(self.rtsnode.list_parameters()):
groups.append('parameter')
if len(self.rtsnode.list_attributes()):
groups.append('attribute')
return groups
# Support late params (see above)
def list_group_params(self, group, writable=None):
if group not in ("parameter", "attribute"):
return super(UIFabricModule, self).list_group_params(group,
writable)
params_func = getattr(self.rtsnode, "list_%ss" % group)
params = params_func()
params_ro = params_func(writable=False)
ret_list = []
for param in params:
p_writable = param not in params_ro
if writable is not None and p_writable != writable:
continue
ret_list.append(param)
ret_list.sort()
return ret_list
# Support late params (see above)
def get_group_param(self, group, param):
if group not in ("parameter", "attribute"):
return super(UIFabricModule, self).get_group_param(group, param)
if param not in self.list_group_params(group):
raise ValueError("Not such parameter %s in configuration group %s"
% (param, group))
description = "The %s %s." % (param, group)
writable = param in self.list_group_params(group, writable=True)
return dict(name=param, group=group, type="string",
description=description, writable=writable)
def ui_getgroup_discovery_auth(self, auth_attr):
'''
This is the backend method for getting discovery_auth attributes.
@param auth_attr: The auth attribute to get the value of.
@type auth_attr: str
@return: The auth attribute's value
@rtype: str
'''
if auth_attr == 'enable':
return self.rtsnode.discovery_enable_auth
else:
return getattr(self.rtsnode, "discovery_" + auth_attr)
def ui_setgroup_discovery_auth(self, auth_attr, value):
'''
This is the backend method for setting discovery auth attributes.
@param auth_attr: The auth attribute to set the value of.
@type auth_attr: str
@param value: The auth's value
@type value: str
'''
self.assert_root()
if value is None:
value = ''
if auth_attr == 'enable':
self.rtsnode.discovery_enable_auth = value
else:
setattr(self.rtsnode, "discovery_" + auth_attr, value)
def refresh(self):
self._children = set([])
for target in self.rtsnode.targets:
self.shell.log.debug("Found target %s under fabric module %s."
% (target.wwn, target.fabric_module))
if target.has_feature('tpgts'):
UIMultiTPGTarget(target, self)
else:
UITarget(target, self)
def summary(self):
status = None
msg = []
fm = self.rtsnode
if fm.has_feature('discovery_auth') and fm.discovery_enable_auth:
if not (fm.discovery_password and fm.discovery_userid):
status = False
else:
status = True
if fm.discovery_authenticate_target:
msg.append("mutual disc auth")
else:
msg.append("1-way disc auth")
msg.append("Targets: %d" % len(self._children))
return (", ".join(msg), status)
def ui_command_create(self, wwn=None):
'''
Creates a new target. The I{wwn} format depends on the transport(s)
supported by the fabric module. If the I{wwn} is ommited, then a
target will be created using either a randomly generated WWN of the
proper type, or the first unused WWN in the list of possible WWNs if
one is available. If WWNs are constrained to a list (i.e. for hardware
targets addresses) and all WWNs are in use, the target creation will
fail. Use the B{info} command to get more information abour WWN type
and possible values.
SEE ALSO
========
B{info}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='create')
wwn = target.wwn
if self.rtsnode.wwns != None and wwn not in self.rtsnode.wwns:
self.shell.log.warning("Hardware missing for this WWN")
if target.has_feature('tpgts'):
ui_target = UIMultiTPGTarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return ui_target.ui_command_create()
else:
ui_target = UITarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return self.new_node(ui_target)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn' and self.rtsnode.wwns is not None:
existing_wwns = [child.wwn for child in self.rtsnode.targets]
completions = [wwn for wwn in self.rtsnode.wwns
if wwn.startswith(text)
if wwn not in existing_wwns]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, wwn):
'''
Recursively deletes the target with the specified I{wwn}, and all
objects hanging under it.
SEE ALSO
========
B{create}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='lookup')
target.delete()
self.shell.log.info("Deleted Target %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [child.name for child in self.children]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_info(self):
'''
Displays information about the fabric module, notably the supported
transports(s) and accepted B{wwn} format(s), as long as supported
features.
'''
fabric = self.rtsnode
self.shell.log.info("Fabric module name: %s" % self.name)
self.shell.log.info("ConfigFS path: %s" % self.rtsnode.path)
self.shell.log.info("Allowed WWN types: %s" % ", ".join(fabric.wwn_types))
if fabric.wwns is not None:
self.shell.log.info("Allowed WWNs list: %s" % ', '.join(fabric.wwns))
self.shell.log.info("Fabric module features: %s" % ', '.join(fabric.features))
self.shell.log.info("Corresponding kernel module: %s"
% fabric.kernel_module)
def ui_command_version(self):
'''
Displays the target fabric module version.
'''
version = "Target fabric module %s: %s" \
% (self.rtsnode.name, self.rtsnode.version)
self.shell.con.display(version.strip())
class UIMultiTPGTarget(UIRTSLibNode):
'''
A generic target UI that has multiple TPGs.
'''
def __init__(self, target, parent):
super(UIMultiTPGTarget, self).__init__(target.wwn, target, parent)
self.refresh()
def refresh(self):
self._children = set([])
for tpg in self.rtsnode.tpgs:
UITPG(tpg, self)
def summary(self):
try:
self.rtsnode.fabric_module.to_normalized_wwn(self.rtsnode.wwn)
except:
return ("INVALID WWN", False)
return ("TPGs: %d" % len(self._children), None)
def ui_command_create(self, tag=None):
'''
Creates a new Target Portal Group within the target. The
I{tag} must be a positive integer value, optionally prefaced
by 'tpg'. If omitted, the next available Target Portal Group
Tag (TPGT) will be used.
SEE ALSO
========
B{delete}
'''
self.assert_root()
if tag:
if tag.startswith("tpg"):
tag = tag[3:]
try:
tag = int(tag)
except ValueError:
raise ExecutionError("Tag argument must be a number.")
tpg = TPG(self.rtsnode, tag, mode='create')
if self.shell.prefs['auto_enable_tpgt']:
tpg.enable = True
if tpg.has_feature("auth"):
tpg.set_attribute("authentication", 0)
self.shell.log.info("Created TPG %s." % tpg.tag)
if tpg.has_feature("nps") and self.shell.prefs['auto_add_default_portal']:
try:
NetworkPortal(tpg, "0.0.0.0")
self.shell.log.info("Global pref auto_add_default_portal=true")
self.shell.log.info("Created default portal listening on all IPs"
" (0.0.0.0), port 3260.")
except RTSLibError:
self.shell.log.info("Default portal not created, TPGs within a " +
"target cannot share ip:port.")
ui_tpg = UITPG(tpg, self)
return self.new_node(ui_tpg)
def ui_command_delete(self, tag):
'''
Deletes the Target Portal Group with TPGT I{tag} from the target. The
I{tag} must be a positive integer matching an existing TPGT.
SEE ALSO
========
B{create}
'''
self.assert_root()
if tag.startswith("tpg"):
tag = tag[3:]
try:
tag = int(tag)
except ValueError:
raise ExecutionError("Tag argument must be a number.")
tpg = TPG(self.rtsnode, tag, mode='lookup')
tpg.delete()
self.shell.log.info("Deleted TPGT %s." % tag)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'tag':
tags = [child.name[4:] for child in self.children]
completions = [tag for tag in tags if tag.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UITPG(UIRTSLibNode):
ui_desc_attributes = {
'authentication': ('number', 'If set to 1, enforce authentication for this TPG.'),
'cache_dynamic_acls': ('number', 'If set to 1 in demo mode, cache dynamically generated ACLs.'),
'default_cmdsn_depth': ('number', 'Default CmdSN (Command Sequence Number) depth.'),
'default_erl': ('number', 'Default Error Recovery Level.'),
'demo_mode_discovery': ('number', 'If set to 1 in demo mode, enable discovery.'),
'demo_mode_write_protect': ('number', 'If set to 1 in demo mode, prevent writes to LUNs.'),
'fabric_prot_type': ('number', 'Fabric DIF protection type.'),
'generate_node_acls': ('number', 'If set to 1, allow all initiators to login (i.e. demo mode).'),
'login_timeout': ('number', 'Login timeout value in seconds.'),
'netif_timeout': ('number', 'NIC failure timeout in seconds.'),
'prod_mode_write_protect': ('number', 'If set to 1, prevent writes to LUNs.'),
't10_pi': ('number', 'If set to 1, enable T10 Protection Information.'),
'tpg_enabled_sendtargets': ('number', 'If set to 1, the SendTargets discovery response advertises the TPG only if the TPG is enabled.'),
}
ui_desc_parameters = {
'AuthMethod': ('string', 'Authentication method used by the TPG.'),
'DataDigest': ('string', 'If set to CRC32C, the integrity of the PDU data part is verified.'),
'DataPDUInOrder': ('yesno', 'If set to Yes, the data PDUs within sequences must be in order.'),
'DataSequenceInOrder': ('yesno', 'If set to Yes, the data sequences must be in order.'),
'DefaultTime2Retain': ('number', 'Maximum time, in seconds, after an initial wait, before which an active task reassignment is still possible after an unexpected connection termination or a connection reset.'),
'DefaultTime2Wait': ('number', 'Minimum time, in seconds, to wait before attempting an explicit/implicit logout or an active task reassignment after an unexpected connection termination or a connection reset.'),
'ErrorRecoveryLevel': ('number', 'Recovery levels represent a combination of recovery capabilities.'),
'FirstBurstLength': ('number', 'Maximum amount in bytes of unsolicited data an initiator may send.'),
'HeaderDigest': ('string', 'If set to CRC32C, the integrity of the PDU header part is verified.'),
'IFMarker': ('yesno', 'Deprecated according to RFC 7143.'),
'IFMarkInt': ('string', 'Deprecated according to RFC 7143.'),
'ImmediateData': ('string', 'Immediate data support.'),
'InitialR2T': ('yesno', 'If set to No, the default use of R2T (Ready To Transfer) is disabled.'),
'MaxBurstLength': ('number', 'Maximum SCSI data payload in bytes in a Data-In or a solicited Data-Out iSCSI sequence.'),
'MaxConnections': ('number', 'Maximum number of connections acceptable.'),
'MaxOutstandingR2T': ('number', 'Maximum number of outstanding R2Ts per task.'),
'MaxRecvDataSegmentLength': ('number', 'Maximum data segment length in bytes the target can receive in an iSCSI PDU.'),
'MaxXmitDataSegmentLength': ('number', 'Outgoing MaxRecvDataSegmentLength sent over the wire during iSCSI login response.'),
'OFMarker': ('yesno', 'Deprecated according to RFC 7143.'),
'OFMarkInt': ('string', 'Deprecated according to RFC 7143.'),
'TargetAlias': ('string', 'Human-readable target name or description.'),
}
'''
A generic TPG UI.
'''
def __init__(self, tpg, parent):
name = "tpg%d" % tpg.tag
super(UITPG, self).__init__(name, tpg, parent)
self.refresh()
UILUNs(tpg, self)
if tpg.has_feature('acls'):
UINodeACLs(self.rtsnode, self)
if tpg.has_feature('nps'):
UIPortals(self.rtsnode, self)
if self.rtsnode.has_feature('auth') \
and os.path.exists(self.rtsnode.path + "/auth"):
for param in auth_params:
self.define_config_group_param('auth', param, 'string')
def summary(self):
tpg = self.rtsnode
status = None
msg = []
if tpg.has_feature('nexus'):
msg.append(str(self.rtsnode.nexus))
if not tpg.enable:
return ("disabled", False)
if tpg.has_feature("acls"):
if "generate_node_acls" in tpg.list_attributes() and \
int(tpg.get_attribute("generate_node_acls")):
msg.append("gen-acls")
else:
msg.append("no-gen-acls")
# 'auth' feature requires 'acls'
if tpg.has_feature("auth"):
if not int(tpg.get_attribute("authentication")):
msg.append("no-auth")
if int(tpg.get_attribute("generate_node_acls")):
# if auth=0, g_n_a=1 is recommended
status = True
else:
if not int(tpg.get_attribute("generate_node_acls")):
msg.append("auth per-acl")
else:
msg.append("tpg-auth")
status = True
if not (tpg.chap_password and tpg.chap_userid):
status = False
if tpg.authenticate_target:
msg.append("mutual auth")
else:
msg.append("1-way auth")
return (", ".join(msg), status)
def ui_getgroup_auth(self, auth_attr):
return getattr(self.rtsnode, "chap_" + auth_attr)
def ui_setgroup_auth(self, auth_attr, value):
self.assert_root()
if value is None:
value = ''
setattr(self.rtsnode, "chap_" + auth_attr, value)
def ui_command_enable(self):
'''
Enables the TPG.
SEE ALSO
========
B{disable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.shell.log.info("The TPGT is already enabled.")
else:
try:
self.rtsnode.enable = True
self.shell.log.info("The TPGT has been enabled.")
except RTSLibError:
raise ExecutionError("The TPGT could not be enabled.")
def ui_command_disable(self):
'''
Disables the TPG.
SEE ALSO
========
B{enable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.rtsnode.enable = False
self.shell.log.info("The TPGT has been disabled.")
else:
self.shell.log.info("The TPGT is already disabled.")
class UITarget(UITPG):
'''
A generic target UI merged with its only TPG.
'''
def __init__(self, target, parent):
super(UITarget, self).__init__(TPG(target, 1), parent)
self._name = target.wwn
self.target = target
if self.parent.name != "sbp":
self.rtsnode.enable = True
def summary(self):
try:
self.target.fabric_module.to_normalized_wwn(self.target.wwn)
except:
return ("INVALID WWN", False)
return super(UITarget, self).summary()
class UINodeACLs(UINode):
'''
A generic UI for node ACLs.
'''
def __init__(self, tpg, parent):
super(UINodeACLs, self).__init__("acls", parent)
self.tpg = tpg
self.refresh()
def refresh(self):
self._children = set([])
for name in self.all_names():
UINodeACL(name, self)
def summary(self):
return ("ACLs: %d" % len(self._children), None)
def ui_command_create(self, wwn, add_mapped_luns=None):
'''
Creates a Node ACL for the initiator node with the specified I{wwn}.
The node's I{wwn} must match the expected WWN Type of the target's
fabric module.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the ACL, mapped LUNs will be
automatically created for all existing LUNs.
SEE ALSO
========
B{delete}
'''
self.assert_root()
add_mapped_luns = self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
node_acl = NodeACL(self.tpg, wwn, mode="create")
ui_node_acl = UINodeACL(node_acl.node_wwn, self)
self.shell.log.info("Created Node ACL for %s" % node_acl.node_wwn)
if add_mapped_luns:
for lun in self.tpg.luns:
MappedLUN(node_acl, lun.lun, lun.lun, write_protect=False)
self.shell.log.info("Created mapped LUN %d." % lun.lun)
self.refresh()
return self.new_node(ui_node_acl)
def ui_command_delete(self, wwn):
'''
Deletes the Node ACL with the specified I{wwn}.
SEE ALSO
========
B{create}
'''
self.assert_root()
node_acl = NodeACL(self.tpg, wwn, mode='lookup')
node_acl.delete()
self.shell.log.info("Deleted Node ACL %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [acl.node_wwn for acl in self.tpg.node_acls]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def find_tagged(self, name):
for na in self.tpg.node_acls:
if na.node_wwn == name:
yield na
elif na.tag == name:
yield na
def all_names(self):
names = set([])
for na in self.tpg.node_acls:
if na.tag:
names.add(na.tag)
else:
names.add(na.node_wwn)
return names
def ui_command_tag(self, wwn_or_tag, new_tag):
'''
Tag a NodeACL.
Usage: tag <wwn_or_tag> <new_tag>
Tags help manage initiator WWNs. A tag can apply to one or
more WWNs. This can give a more meaningful name to a single
initiator's configuration, or allow multiple initiators with
identical settings to be configured en masse.
The WWNs described by <wwn_or_tag> will be given the new
tag. If new_tag already exists, its new members will adopt the
current tag's configuration.
Within a tag, the 'info' command shows the WWNs the tag applies to.
Use 'untag' to remove tags.
NOTE: tags are only supported in kernel 3.8 and above.
'''
if wwn_or_tag == new_tag:
return
# Since all WWNs have a '.' in them, let's avoid confusion.
if '.' in new_tag:
raise ExecutionError("'.' not permitted in tag names.")
src = list(self.find_tagged(wwn_or_tag))
if not src:
raise ExecutionError("wwn_or_tag %s not found." % wwn_or_tag)
old_tag_members = list(self.find_tagged(new_tag))
# handle overlap
src_wwns = [na.node_wwn for na in src]
old_tag_members = [old for old in old_tag_members if old.node_wwn not in src_wwns]
for na in src:
na.tag = new_tag
# if joining a tag, take its config
if old_tag_members:
model = old_tag_members[0]
for mlun in na.mapped_luns:
mlun.delete()
for mlun in model.mapped_luns:
MappedLUN(na, mlun.mapped_lun, mlun.tpg_lun, mlun.write_protect)
if self.parent.rtsnode.has_feature("auth"):
for param in auth_params:
setattr(na, "chap_" + param, getattr(model, "chap_" + param))
for item in model.list_attributes(writable=True):
na.set_attribute(item, model.get_attribute(item))
for item in model.list_parameters(writable=True):
na.set_parameter(item, model.get_parameter(item))
self.refresh()
def ui_command_untag(self, wwn_or_tag):
'''
Untag a NodeACL.
Usage: untag <tag>
Remove the tag given to one or more initiator WWNs. They will
return to being displayed by WWN in the configuration tree, and
will maintain settings from when they were tagged.
'''
for na in list(self.find_tagged(wwn_or_tag)):
na.tag = None
self.refresh()
def ui_complete_tag(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command tag
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn_or_tag':
completions = [n for n in self.all_names() if n.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
ui_complete_untag = ui_complete_tag
class UINodeACL(UIRTSLibNode):
'''
A generic UI for a node ACL.
Handles grouping multiple NodeACLs in UI via tags.
All gets are performed against first NodeACL.
All sets are performed on all NodeACLs.
This is to make management of multiple ACLs easier.
'''
ui_desc_attributes = {
'dataout_timeout': ('number', 'Data-Out timeout in seconds before invoking recovery.'),
'dataout_timeout_retries': ('number', 'Number of Data-Out timeout recovery attempts before failing a path.'),
'default_erl': ('number', 'Default Error Recovery Level.'),
'nopin_response_timeout': ('number', 'Nop-In response timeout in seconds.'),
'nopin_timeout': ('number', 'Nop-In timeout in seconds.'),
'random_datain_pdu_offsets': ('number', 'If set to 1, request random Data-In PDU offsets.'),
'random_datain_seq_offsets': ('number', 'If set to 1, request random Data-In sequence offsets.'),
'random_r2t_offsets': ('number', 'If set to 1, request random R2T (Ready To Transfer) offsets.'),
}
ui_desc_parameters = UITPG.ui_desc_parameters
def __init__(self, name, parent):
# Don't want to duplicate work in UIRTSLibNode, so call it but
# del self.rtsnode to make sure we always use self.rtsnodes.
self.rtsnodes = list(parent.find_tagged(name))
super(UINodeACL, self).__init__(name, self.rtsnodes[0], parent)
del self.rtsnode
if self.parent.parent.rtsnode.has_feature('auth'):
for parameter in auth_params:
self.define_config_group_param('auth', parameter, 'string')
self.refresh()
def ui_getgroup_auth(self, auth_attr):
'''
This is the backend method for getting auths attributes.
@param auth_attr: The auth attribute to get the value of.
@type auth_attr: str
@return: The auth attribute's value
@rtype: str
'''
# All should return same, so just return from the first one
return getattr(self.rtsnodes[0], "chap_" + auth_attr)
def ui_setgroup_auth(self, auth_attr, value):
'''
This is the backend method for setting auths attributes.
@param auth_attr: The auth attribute to set the value of.
@type auth_attr: str
@param value: The auth's value
@type value: str
'''
self.assert_root()
if value is None:
value = ''
for na in self.rtsnodes:
setattr(na, "chap_" + auth_attr, value)
def refresh(self):
self._children = set([])
for mlun in self.rtsnodes[0].mapped_luns:
UIMappedLUN(mlun, self)
def summary(self):
msg = []
if self.name != self.rtsnodes[0].node_wwn:
if len(self.rtsnodes) > 1:
msg.append("(group of %d)" % len(self.rtsnodes))
else:
msg.append("(%s)" % self.rtsnodes[0].node_wwn)
status = None
na = self.rtsnodes[0]
tpg = self.parent.parent.rtsnode
if tpg.has_feature("auth") and \
int(tpg.get_attribute("authentication")):
if int(tpg.get_attribute("generate_node_acls")):
msg.append("auth via tpg")
else:
status = True
if not (na.chap_password and na.chap_userid):
status = False
if na.authenticate_target:
msg.append("mutual auth")
else:
msg.append("1-way auth")
msg.append("Mapped LUNs: %d" % len(self._children))
return (", ".join(msg), status)
def ui_command_create(self, mapped_lun, tpg_lun_or_backstore, write_protect=None):
'''
Creates a mapping to one of the TPG LUNs for the initiator referenced
by the ACL. The provided I{tpg_lun_or_backstore} will appear to that
initiator as LUN I{mapped_lun}. If the I{write_protect} flag is set to
B{1}, the initiator will not have write access to the Mapped LUN.
A storage object may also be given for the I{tpg_lun_or_backstore} parameter,
in which case the TPG LUN will be created for that backstore before
mapping the LUN to the initiator. If a TPG LUN for the backstore already
exists, the Mapped LUN will map to that TPG LUN.
Finally, a path to an existing block device or file can be given. If so,
a storage object of the appropriate type is created with default parameters,
followed by the TPG LUN and the Mapped LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
try:
mapped_lun = int(mapped_lun)
except ValueError:
raise ExecutionError("mapped_lun must be an integer")
try:
if tpg_lun_or_backstore.startswith("lun"):
tpg_lun_or_backstore = tpg_lun_or_backstore[3:]
tpg_lun = int(tpg_lun_or_backstore)
except ValueError:
try:
so = self.get_node(tpg_lun_or_backstore).rtsnode
except ValueError:
try:
so = StorageObjectFactory(tpg_lun_or_backstore)
self.shell.log.info("Created storage object %s." % so.name)
except RTSLibError:
raise ExecutionError("LUN, storage object, or path not valid")
self.get_node("/backstores").refresh()
ui_tpg = self.parent.parent
for lun in ui_tpg.rtsnode.luns:
if so == lun.storage_object:
tpg_lun = lun.lun
break
else:
lun_object = LUN(ui_tpg.rtsnode, storage_object=so)
self.shell.log.info("Created LUN %s." % lun_object.lun)
ui_lun = UILUN(lun_object, ui_tpg.get_node("luns"))
tpg_lun = ui_lun.rtsnode.lun
if tpg_lun in (ml.tpg_lun.lun for ml in self.rtsnodes[0].mapped_luns):
self.shell.log.warning(
"Warning: TPG LUN %d already mapped to this NodeACL" % tpg_lun)
for na in self.rtsnodes:
mlun = MappedLUN(na, mapped_lun, tpg_lun, write_protect)
ui_mlun = UIMappedLUN(mlun, self)
self.shell.log.info("Created Mapped LUN %s." % mlun.mapped_lun)
return self.new_node(ui_mlun)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'tpg_lun_or_backstore':
completions = []
for backstore in self.get_node('/backstores').children:
for storage_object in backstore.children:
completions.append(storage_object.path)
completions.extend(lun.name for lun in self.parent.parent.get_node("luns").children)
completions.extend(complete_path(text, lambda x: stat.S_ISREG(x) or stat.S_ISBLK(x)))
completions = [c for c in completions if c.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, mapped_lun):
'''
Deletes the specified I{mapped_lun}.
SEE ALSO
========
B{create}
'''
self.assert_root()
for na in self.rtsnodes:
mlun = MappedLUN(na, mapped_lun)
mlun.delete()
self.shell.log.info("Deleted Mapped LUN %s." % mapped_lun)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'mapped_lun':
mluns = [str(mlun.mapped_lun) for mlun in self.rtsnodes[0].mapped_luns]
completions = [mlun for mlun in mluns if mlun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
# Override these four methods to handle multiple NodeACLs
def ui_getgroup_attribute(self, attribute):
return self.rtsnodes[0].get_attribute(attribute)
def ui_setgroup_attribute(self, attribute, value):
self.assert_root()
for na in self.rtsnodes:
na.set_attribute(attribute, value)
def ui_getgroup_parameter(self, parameter):
return self.rtsnodes[0].get_parameter(parameter)
def ui_setgroup_parameter(self, parameter, value):
self.assert_root()
for na in self.rtsnodes:
na.set_parameter(parameter, value)
def ui_command_info(self):
'''
Since we don't have a self.rtsnode we can't use the base implementation
of this method. We also want to not print node_wwn, but list *all*
wwns for this entry.
'''
info = self.rtsnodes[0].dump()
for item in ('attributes', 'parameters', "node_wwn"):
if item in info:
del info[item]
for name, value in sorted(six.iteritems(info)):
if not isinstance (value, (dict, list)):
self.shell.log.info("%s: %s" % (name, value))
self.shell.log.info("wwns:")
for na in self.parent.find_tagged(self.name):
self.shell.log.info(na.node_wwn)
class UIMappedLUN(UIRTSLibNode):
'''
A generic UI for MappedLUN objects.
'''
def __init__(self, mapped_lun, parent):
name = "mapped_lun%d" % mapped_lun.mapped_lun
super(UIMappedLUN, self).__init__(name, mapped_lun, parent)
self.refresh()
def summary(self):
mapped_lun = self.rtsnode
is_healthy = True
try:
tpg_lun = mapped_lun.tpg_lun
except RTSLibBrokenLink:
description = "BROKEN LUN LINK"
is_healthy = False
else:
if mapped_lun.write_protect:
access_mode = 'ro'
else:
access_mode = 'rw'
description = "lun%d %s/%s (%s)" \
% (tpg_lun.lun, tpg_lun.storage_object.plugin,
tpg_lun.storage_object.name, access_mode)
return (description, is_healthy)
class UILUNs(UINode):
'''
A generic UI for TPG LUNs.
'''
def __init__(self, tpg, parent):
super(UILUNs, self).__init__("luns", parent)
self.tpg = tpg
self.refresh()
def refresh(self):
self._children = set([])
for lun in self.tpg.luns:
UILUN(lun, self)
def summary(self):
return ("LUNs: %d" % len(self._children), None)
def ui_command_create(self, storage_object, lun=None,
add_mapped_luns=None):
'''
Creates a new LUN in the Target Portal Group, attached to a storage
object. If the I{lun} parameter is omitted, the first available LUN in
the TPG will be used. If present, it must be a number greater than 0.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
The I{storage_object} may be the path of an existing storage object,
i.e. B{/backstore/pscsi0/mydisk} to reference the B{mydisk} storage
object of the virtual HBA B{pscsi0}. It also may be the path to an
existing block device or image file, in which case a storage object
will be created for it first, with default parameters.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the LUN, mapped LUNs will be
automatically created for all existing node ACLs, mapping the new LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
add_mapped_luns = \
self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
try:
so = self.get_node(storage_object).rtsnode
except ValueError:
try:
so = StorageObjectFactory(storage_object)
self.shell.log.info("Created storage object %s." % so.name)
except RTSLibError:
raise ExecutionError("storage object or path not valid")
self.get_node("/backstores").refresh()
if so in (l.storage_object for l in self.parent.rtsnode.luns):
raise ExecutionError("lun for storage object %s/%s already exists" \
% (so.plugin, so.name))
if lun and lun.lower().startswith('lun'):
lun = lun[3:]
lun_object = LUN(self.tpg, lun, so)
self.shell.log.info("Created LUN %s." % lun_object.lun)
ui_lun = UILUN(lun_object, self)
if add_mapped_luns:
for acl in self.tpg.node_acls:
if lun:
mapped_lun = lun
else:
mapped_lun = 0
existing_mluns = [mlun.mapped_lun for mlun in acl.mapped_luns]
if mapped_lun in existing_mluns:
mapped_lun = None
for possible_mlun in six.moves.range(MappedLUN.MAX_LUN):
if possible_mlun not in existing_mluns:
mapped_lun = possible_mlun
break
if mapped_lun == None:
self.shell.log.warning(
"Cannot map new lun %s into ACL %s"
% (lun_object.lun, acl.node_wwn))
else:
mlun = MappedLUN(acl, mapped_lun, lun_object, write_protect=False)
self.shell.log.info("Created LUN %d->%d mapping in node ACL %s"
% (mlun.tpg_lun.lun, mlun.mapped_lun, acl.node_wwn))
self.parent.refresh()
return self.new_node(ui_lun)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'storage_object':
storage_objects = []
for backstore in self.get_node('/backstores').children:
for storage_object in backstore.children:
storage_objects.append(storage_object.path)
completions = [so for so in storage_objects if so.startswith(text)]
completions.extend(complete_path(text, lambda x: stat.S_ISREG(x) or stat.S_ISBLK(x)))
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, lun):
'''
Deletes the supplied LUN from the Target Portal Group. The I{lun} must
be a positive number matching an existing LUN.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
SEE ALSO
========
B{create}
'''
self.assert_root()
if lun.lower().startswith("lun"):
lun = lun[3:]
try:
lun_object = LUN(self.tpg, lun)
except:
raise RTSLibError("Invalid LUN")
lun_object.delete()
self.shell.log.info("Deleted LUN %s." % lun)
# Refresh the TPG as we need to also refresh acls MappedLUNs
self.parent.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'lun':
luns = [str(lun.lun) for lun in self.tpg.luns]
completions = [lun for lun in luns if lun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UILUN(UIRTSLibNode):
'''
A generic UI for LUN objects.
'''
def __init__(self, lun, parent):
name = "lun%d" % lun.lun
super(UILUN, self).__init__(name, lun, parent)
self.refresh()
self.define_config_group_param("alua", "alua_tg_pt_gp_name", 'string')
def summary(self):
lun = self.rtsnode
is_healthy = True
try:
storage_object = lun.storage_object
except RTSLibBrokenLink:
description = "BROKEN STORAGE LINK"
is_healthy = False
else:
description = "%s/%s" % (storage_object.plugin, storage_object.name,)
if storage_object.udev_path:
description += " (%s)" % storage_object.udev_path
description += " (%s)" % lun.alua_tg_pt_gp_name
return (description, is_healthy)
def ui_getgroup_alua(self, alua_attr):
return getattr(self.rtsnode, alua_attr)
def ui_setgroup_alua(self, alua_attr, value):
self.assert_root()
if value is None:
return
setattr(self.rtsnode, alua_attr, value)
class UIPortals(UINode):
'''
A generic UI for TPG network portals.
'''
def __init__(self, tpg, parent):
super(UIPortals, self).__init__("portals", parent)
self.tpg = tpg
self.refresh()
def refresh(self):
self._children = set([])
for portal in self.tpg.network_portals:
UIPortal(portal, self)
def summary(self):
return ("Portals: %d" % len(self._children), None)
def _canonicalize_ip(self, ip_address):
"""
rtslib expects ipv4 addresses as a dotted-quad string, and IPv6
addresses surrounded by brackets.
"""
# Contains a '.'? Must be ipv4, right?
if "." in ip_address:
return ip_address
return "[" + ip_address + "]"
def ui_command_create(self, ip_address=None, ip_port=None):
'''
Creates a Network Portal with specified I{ip_address} and
I{ip_port}. If I{ip_port} is omitted, the default port for
the target fabric will be used. If I{ip_address} is omitted,
INADDR_ANY (0.0.0.0) will be used.
Choosing IN6ADDR_ANY (::0) will listen on all IPv6 interfaces
as well as IPv4, assuming IPV6_V6ONLY sockopt has not been
set.
Note: Portals on Link-local IPv6 addresses are currently not
supported.
SEE ALSO
========
B{delete}
'''
self.assert_root()
# FIXME: Add a specfile parameter to determine default port
ip_port = self.ui_eval_param(ip_port, 'number', 3260)
ip_address = self.ui_eval_param(ip_address, 'string', "0.0.0.0")
if ip_port == 3260:
self.shell.log.info("Using default IP port %d" % ip_port)
if ip_address == "0.0.0.0":
self.shell.log.info("Binding to INADDR_ANY (0.0.0.0)")
portal = NetworkPortal(self.tpg, self._canonicalize_ip(ip_address),
ip_port, mode='create')
self.shell.log.info("Created network portal %s:%d."
% (ip_address, ip_port))
ui_portal = UIPortal(portal, self)
return self.new_node(ui_portal)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
def list_eth_ips():
if not ethtool:
return []
devcfgs = ethtool.get_interfaces_info(ethtool.get_devices())
addrs = set()
for d in devcfgs:
if d.ipv4_address:
addrs.add(d.ipv4_address)
addrs.add("0.0.0.0")
for ip6 in d.get_ipv6_addresses():
addrs.add(ip6.address)
addrs.add("::0") # only list ::0 if ipv6 present
return sorted(addrs)
if current_param == 'ip_address':
completions = [addr for addr in list_eth_ips()
if addr.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, ip_address, ip_port):
'''
Deletes the Network Portal with specified I{ip_address} and I{ip_port}.
SEE ALSO
========
B{create}
'''
self.assert_root()
portal = NetworkPortal(self.tpg, self._canonicalize_ip(ip_address),
ip_port, mode='lookup')
portal.delete()
self.shell.log.info("Deleted network portal %s:%s"
% (ip_address, ip_port))
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
completions = []
# TODO: Check if a dict comprehension is acceptable here with supported
# XXX: python versions.
portals = {}
all_ports = set([])
for portal in self.tpg.network_portals:
all_ports.add(str(portal.port))
portal_ip = portal.ip_address.strip('[]')
if not portal_ip in portals:
portals[portal_ip] = []
portals[portal_ip].append(str(portal.port))
if current_param == 'ip_address':
completions = [addr for addr in portals if addr.startswith(text)]
if 'ip_port' in parameters:
port = parameters['ip_port']
completions = [addr for addr in completions
if port in portals[addr]]
elif current_param == 'ip_port':
if 'ip_address' in parameters:
addr = parameters['ip_address']
if addr in portals:
completions = [port for port in portals[addr]
if port.startswith(text)]
else:
completions = [port for port in all_ports
if port.startswith(text)]
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UIPortal(UIRTSLibNode):
'''
A generic UI for a network portal.
'''
def __init__(self, portal, parent):
name = "%s:%s" % (portal.ip_address, portal.port)
super(UIPortal, self).__init__(name, portal, parent)
self.refresh()
def summary(self):
if self.rtsnode.iser:
return('iser', True)
elif self.rtsnode.offload:
return('offload', True)
return ('', True)
def ui_command_enable_iser(self, boolean):
'''
Enables or disables iSER for this NetworkPortal.
If iSER is not supported by the kernel, this command will do nothing.
'''
boolean = self.ui_eval_param(boolean, 'bool', False)
self.rtsnode.iser = boolean
self.shell.log.info("iSER enable now: %s" % self.rtsnode.iser)
def ui_command_enable_offload(self, boolean):
'''
Enables or disables offload for this NetworkPortal.
If offload is not supported by the kernel, this command will do nothing.
'''
boolean = self.ui_eval_param(boolean, 'bool', False)
self.rtsnode.offload = boolean
self.shell.log.info("offload enable now: %s" % self.rtsnode.offload)
| {
"content_hash": "2447241bece3806f429bd1913d2da063",
"timestamp": "",
"source": "github",
"line_count": 1478,
"max_line_length": 219,
"avg_line_length": 36.54194857916103,
"alnum_prop": 0.5705160251069267,
"repo_name": "cvubrugier/targetcli-fb",
"id": "6895b38e62b855fd72d653a43c9db455f3ba5ed1",
"size": "54009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "targetcli/ui_target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1612"
},
{
"name": "Python",
"bytes": "109640"
},
{
"name": "Roff",
"bytes": "16093"
}
],
"symlink_target": ""
} |
"""Nginx Configuration"""
from distutils.version import LooseVersion
import logging
import re
import socket
import subprocess
import tempfile
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
import OpenSSL
import pkg_resources
from acme import challenges
from acme import crypto_util as acme_crypto_util
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.display import util as display_util
from certbot.compat import os
from certbot.plugins import common
from certbot_nginx._internal import constants
from certbot_nginx._internal import display_ops
from certbot_nginx._internal import http_01
from certbot_nginx._internal import nginxparser
from certbot_nginx._internal import obj # pylint: disable=unused-import
from certbot_nginx._internal import parser
NAME_RANK = 0
START_WILDCARD_RANK = 1
END_WILDCARD_RANK = 2
REGEX_RANK = 3
NO_SSL_MODIFIER = 4
logger = logging.getLogger(__name__)
class NginxConfigurator(common.Installer, interfaces.Authenticator):
"""Nginx configurator.
.. todo:: Add proper support for comments in the config. Currently,
config files modified by the configurator will lose all their comments.
:ivar config: Configuration.
:type config: certbot.configuration.NamespaceConfig
:ivar parser: Handles low level parsing
:type parser: :class:`~certbot_nginx._internal.parser`
:ivar str save_notes: Human-readable config change notes
:ivar reverter: saves and reverts checkpoints
:type reverter: :class:`certbot.reverter.Reverter`
:ivar tup version: version of Nginx
"""
description = "Nginx Web Server plugin"
DEFAULT_LISTEN_PORT = '80'
# SSL directives that Certbot can add when installing a new certificate.
SSL_DIRECTIVES = ['ssl_certificate', 'ssl_certificate_key', 'ssl_dhparam']
@classmethod
def add_parser_arguments(cls, add):
default_server_root = _determine_default_server_root()
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Nginx server root directory. (default: %s)" % default_server_root)
add("ctl", default=constants.CLI_DEFAULTS["ctl"], help="Path to the "
"'nginx' binary, used for 'configtest' and retrieving nginx "
"version number.")
add("sleep-seconds", default=constants.CLI_DEFAULTS["sleep_seconds"], type=int,
help="Number of seconds to wait for nginx configuration changes "
"to apply when reloading.")
@property
def nginx_conf(self):
"""Nginx config file path."""
return os.path.join(self.conf("server_root"), "nginx.conf")
def __init__(self, *args, **kwargs):
"""Initialize an Nginx Configurator.
:param tup version: version of Nginx as a tuple (1, 4, 7)
(used mostly for unittesting)
:param tup openssl_version: version of OpenSSL linked to Nginx as a tuple (1, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
openssl_version = kwargs.pop("openssl_version", None)
super().__init__(*args, **kwargs)
# Files to save
self.save_notes = ""
# For creating new vhosts if no names match
self.new_vhost: Optional[obj.VirtualHost] = None
# List of vhosts configured per wildcard domain on this run.
# used by deploy_cert() and enhance()
self._wildcard_vhosts: Dict[str, List[obj.VirtualHost]] = {}
self._wildcard_redirect_vhosts: Dict[str, List[obj.VirtualHost]] = {}
# Add number of outstanding challenges
self._chall_out = 0
# These will be set in the prepare function
self.version = version
self.openssl_version = openssl_version
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header,
"staple-ocsp": self._enable_ocsp_stapling}
self.reverter.recovery_routine()
self.parser: parser.NginxParser
@property
def mod_ssl_conf_src(self):
"""Full absolute path to SSL configuration file source."""
# Why all this complexity? Well, we want to support Mozilla's intermediate
# recommendations. But TLS1.3 is only supported by newer versions of Nginx.
# And as for session tickets, our ideal is to turn them off across the board.
# But! Turning them off at all is only supported with new enough versions of
# Nginx. And older versions of OpenSSL have a bug that leads to browser errors
# given certain configurations. While we'd prefer to have forward secrecy, we'd
# rather fail open than error out. Unfortunately, Nginx can be compiled against
# many versions of OpenSSL. So we have to check both for the two different features,
# leading to four different combinations of options.
# For a complete history, check out https://github.com/certbot/certbot/issues/7322
use_tls13 = self.version >= (1, 13, 0)
session_tix_off = self.version >= (1, 5, 9) and self.openssl_version and\
LooseVersion(self.openssl_version) >= LooseVersion('1.0.2l')
if use_tls13:
if session_tix_off:
config_filename = "options-ssl-nginx.conf"
else:
config_filename = "options-ssl-nginx-tls13-session-tix-on.conf"
else:
if session_tix_off:
config_filename = "options-ssl-nginx-tls12-only.conf"
else:
config_filename = "options-ssl-nginx-old.conf"
return pkg_resources.resource_filename(
"certbot_nginx", os.path.join("_internal", "tls_configs", config_filename))
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
@property
def updated_mod_ssl_conf_digest(self):
"""Full absolute path to digest of updated SSL configuration file."""
return os.path.join(self.config.config_dir, constants.UPDATED_MOD_SSL_CONF_DIGEST)
def install_ssl_options_conf(self, options_ssl, options_ssl_digest):
"""Copy Certbot's SSL options file into the system's config dir if required."""
return common.install_version_controlled_file(options_ssl, options_ssl_digest,
self.mod_ssl_conf_src, constants.ALL_SSL_OPTIONS_HASHES)
# This is called in determine_authenticator and determine_installer
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Nginx ctl cannot be found
:raises .errors.MisconfigurationError: If Nginx is misconfigured
"""
# Verify Nginx is installed
if not util.exe_exists(self.conf('ctl')):
raise errors.NoInstallationError(
"Could not find a usable 'nginx' binary. Ensure nginx exists, "
"the binary is executable, and your PATH is set correctly.")
# Make sure configuration is valid
self.config_test()
self.parser = parser.NginxParser(self.conf('server-root'))
# Set Version
if self.version is None:
self.version = self.get_version()
if self.openssl_version is None:
self.openssl_version = self._get_openssl_version()
self.install_ssl_options_conf(self.mod_ssl_conf, self.updated_mod_ssl_conf_digest)
self.install_ssl_dhparams()
# Prevent two Nginx plugins from modifying a config at once
try:
util.lock_dir_until_exit(self.conf('server-root'))
except (OSError, errors.LockError):
logger.debug('Encountered error:', exc_info=True)
raise errors.PluginError('Unable to lock {0}'.format(self.conf('server-root')))
# Entry point in main.py for installing cert
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
"""Deploys certificate to specified virtual host.
.. note:: Aborts if the vhost is missing ssl_certificate or
ssl_certificate_key.
.. note:: This doesn't save the config files!
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives or configuration
"""
if not fullchain_path:
raise errors.PluginError(
"The nginx plugin currently requires --fullchain-path to "
"install a certificate.")
vhosts = self.choose_vhosts(domain, create_if_no_match=True)
for vhost in vhosts:
self._deploy_cert(vhost, cert_path, key_path, chain_path, fullchain_path)
display_util.notify("Successfully deployed certificate for {} to {}"
.format(domain, vhost.filep))
def _deploy_cert(self, vhost, cert_path, key_path, chain_path, fullchain_path): # pylint: disable=unused-argument
"""
Helper function for deploy_cert() that handles the actual deployment
this exists because we might want to do multiple deployments per
domain originally passed for deploy_cert(). This is especially true
with wildcard certificates
"""
cert_directives = [['\n ', 'ssl_certificate', ' ', fullchain_path],
['\n ', 'ssl_certificate_key', ' ', key_path]]
self.parser.update_or_add_server_directives(vhost,
cert_directives)
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
self.save_notes += ("Changed vhost at %s with addresses of %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs)))
self.save_notes += "\tssl_certificate %s\n" % fullchain_path
self.save_notes += "\tssl_certificate_key %s\n" % key_path
def _choose_vhosts_wildcard(self, domain, prefer_ssl, no_ssl_filter_port=None):
"""Prompts user to choose vhosts to install a wildcard certificate for"""
if prefer_ssl:
vhosts_cache = self._wildcard_vhosts
preference_test = lambda x: x.ssl
else:
vhosts_cache = self._wildcard_redirect_vhosts
preference_test = lambda x: not x.ssl
# Caching!
if domain in vhosts_cache:
# Vhosts for a wildcard domain were already selected
return vhosts_cache[domain]
# Get all vhosts whether or not they are covered by the wildcard domain
vhosts = self.parser.get_vhosts()
# Go through the vhosts, making sure that we cover all the names
# present, but preferring the SSL or non-SSL vhosts
filtered_vhosts = {}
for vhost in vhosts:
# Ensure we're listening non-sslishly on no_ssl_filter_port
if no_ssl_filter_port is not None:
if not self._vhost_listening_on_port_no_ssl(vhost, no_ssl_filter_port):
continue
for name in vhost.names:
if preference_test(vhost):
# Prefer either SSL or non-SSL vhosts
filtered_vhosts[name] = vhost
elif name not in filtered_vhosts:
# Add if not in list previously
filtered_vhosts[name] = vhost
# Only unique VHost objects
dialog_input = set(filtered_vhosts.values())
# Ask the user which of names to enable, expect list of names back
return_vhosts = display_ops.select_vhost_multiple(list(dialog_input))
for vhost in return_vhosts:
if domain not in vhosts_cache:
vhosts_cache[domain] = []
vhosts_cache[domain].append(vhost)
return return_vhosts
#######################
# Vhost parsing methods
#######################
def _choose_vhost_single(self, target_name):
matches = self._get_ranked_matches(target_name)
vhosts = [x for x in [self._select_best_name_match(matches)] if x is not None]
return vhosts
def choose_vhosts(self, target_name, create_if_no_match=False):
"""Chooses a virtual host based on the given domain name.
.. note:: This makes the vhost SSL-enabled if it isn't already. Follows
Nginx's server block selection rules preferring blocks that are
already SSL.
.. todo:: This should maybe return list if no obvious answer
is presented.
:param str target_name: domain name
:param bool create_if_no_match: If we should create a new vhost from default
when there is no match found. If we can't choose a default, raise a
MisconfigurationError.
:returns: ssl vhosts associated with name
:rtype: list of :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
if util.is_wildcard_domain(target_name):
# Ask user which VHosts to support.
vhosts = self._choose_vhosts_wildcard(target_name, prefer_ssl=True)
else:
vhosts = self._choose_vhost_single(target_name)
if not vhosts:
if create_if_no_match:
# result will not be [None] because it errors on failure
vhosts = [self._vhost_from_duplicated_default(target_name, True,
str(self.config.https_port))]
else:
# No matches. Raise a misconfiguration error.
raise errors.MisconfigurationError(
("Cannot find a VirtualHost matching domain %s. "
"In order for Certbot to correctly perform the challenge "
"please add a corresponding server_name directive to your "
"nginx configuration for every domain on your certificate: "
"https://nginx.org/en/docs/http/server_names.html") % (target_name))
# Note: if we are enhancing with ocsp, vhost should already be ssl.
for vhost in vhosts:
if not vhost.ssl:
self._make_server_ssl(vhost)
return vhosts
def ipv6_info(self, port):
"""Returns tuple of booleans (ipv6_active, ipv6only_present)
ipv6_active is true if any server block listens ipv6 address in any port
ipv6only_present is true if ipv6only=on option exists in any server
block ipv6 listen directive for the specified port.
:param str port: Port to check ipv6only=on directive for
:returns: Tuple containing information if IPv6 is enabled in the global
configuration, and existence of ipv6only directive for specified port
:rtype: tuple of type (bool, bool)
"""
# port should be a string, but it's easy to mess up, so let's
# make sure it is one
port = str(port)
vhosts = self.parser.get_vhosts()
ipv6_active = False
ipv6only_present = False
for vh in vhosts:
for addr in vh.addrs:
if addr.ipv6:
ipv6_active = True
if addr.ipv6only and addr.get_port() == port:
ipv6only_present = True
return (ipv6_active, ipv6only_present)
def _vhost_from_duplicated_default(self, domain: str, allow_port_mismatch: bool, port: str
) -> obj.VirtualHost:
"""if allow_port_mismatch is False, only server blocks with matching ports will be
used as a default server block template.
"""
assert self.parser is not None # prepare should already have been called here
if self.new_vhost is None:
default_vhost = self._get_default_vhost(domain, allow_port_mismatch, port)
self.new_vhost = self.parser.duplicate_vhost(default_vhost,
remove_singleton_listen_params=True)
self.new_vhost.names = set()
self._add_server_name_to_vhost(self.new_vhost, domain)
return self.new_vhost
def _add_server_name_to_vhost(self, vhost, domain):
vhost.names.add(domain)
name_block = [['\n ', 'server_name']]
for name in vhost.names:
name_block[0].append(' ')
name_block[0].append(name)
self.parser.update_or_add_server_directives(vhost, name_block)
def _get_default_vhost(self, domain, allow_port_mismatch, port):
"""Helper method for _vhost_from_duplicated_default; see argument documentation there"""
vhost_list = self.parser.get_vhosts()
# if one has default_server set, return that one
all_default_vhosts = []
port_matching_vhosts = []
for vhost in vhost_list:
for addr in vhost.addrs:
if addr.default:
all_default_vhosts.append(vhost)
if self._port_matches(port, addr.get_port()):
port_matching_vhosts.append(vhost)
break
if len(port_matching_vhosts) == 1:
return port_matching_vhosts[0]
elif len(all_default_vhosts) == 1 and allow_port_mismatch:
return all_default_vhosts[0]
# TODO: present a list of vhosts for user to choose from
raise errors.MisconfigurationError("Could not automatically find a matching server"
" block for %s. Set the `server_name` directive to use the Nginx installer." % domain)
def _get_ranked_matches(self, target_name):
"""Returns a ranked list of vhosts that match target_name.
The ranking gives preference to SSL vhosts.
:param str target_name: The name to match
:returns: list of dicts containing the vhost, the matching name, and
the numerical rank
:rtype: list
"""
vhost_list = self.parser.get_vhosts()
return self._rank_matches_by_name_and_ssl(vhost_list, target_name)
def _select_best_name_match(self, matches):
"""Returns the best name match of a ranked list of vhosts.
:param list matches: list of dicts containing the vhost, the matching name,
and the numerical rank
:returns: the most matching vhost
:rtype: :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
if not matches:
return None
elif matches[0]['rank'] in [START_WILDCARD_RANK, END_WILDCARD_RANK,
START_WILDCARD_RANK + NO_SSL_MODIFIER, END_WILDCARD_RANK + NO_SSL_MODIFIER]:
# Wildcard match - need to find the longest one
rank = matches[0]['rank']
wildcards = [x for x in matches if x['rank'] == rank]
return max(wildcards, key=lambda x: len(x['name']))['vhost']
# Exact or regex match
return matches[0]['vhost']
def _rank_matches_by_name(self, vhost_list, target_name):
"""Returns a ranked list of vhosts from vhost_list that match target_name.
This method should always be followed by a call to _select_best_name_match.
:param list vhost_list: list of vhosts to filter and rank
:param str target_name: The name to match
:returns: list of dicts containing the vhost, the matching name, and
the numerical rank
:rtype: list
"""
# Nginx chooses a matching server name for a request with precedence:
# 1. exact name match
# 2. longest wildcard name starting with *
# 3. longest wildcard name ending with *
# 4. first matching regex in order of appearance in the file
matches = []
for vhost in vhost_list:
name_type, name = parser.get_best_match(target_name, vhost.names)
if name_type == 'exact':
matches.append({'vhost': vhost,
'name': name,
'rank': NAME_RANK})
elif name_type == 'wildcard_start':
matches.append({'vhost': vhost,
'name': name,
'rank': START_WILDCARD_RANK})
elif name_type == 'wildcard_end':
matches.append({'vhost': vhost,
'name': name,
'rank': END_WILDCARD_RANK})
elif name_type == 'regex':
matches.append({'vhost': vhost,
'name': name,
'rank': REGEX_RANK})
return sorted(matches, key=lambda x: x['rank'])
def _rank_matches_by_name_and_ssl(self, vhost_list, target_name):
"""Returns a ranked list of vhosts from vhost_list that match target_name.
The ranking gives preference to SSLishness before name match level.
:param list vhost_list: list of vhosts to filter and rank
:param str target_name: The name to match
:returns: list of dicts containing the vhost, the matching name, and
the numerical rank
:rtype: list
"""
matches = self._rank_matches_by_name(vhost_list, target_name)
for match in matches:
if not match['vhost'].ssl:
match['rank'] += NO_SSL_MODIFIER
return sorted(matches, key=lambda x: x['rank'])
def choose_redirect_vhosts(self, target_name: str, port: str) -> List[obj.VirtualHost]:
"""Chooses a single virtual host for redirect enhancement.
Chooses the vhost most closely matching target_name that is
listening to port without using ssl.
.. todo:: This should maybe return list if no obvious answer
is presented.
.. todo:: The special name "$hostname" corresponds to the machine's
hostname. Currently we just ignore this.
:param str target_name: domain name
:param str port: port number
:returns: vhosts associated with name
:rtype: list of :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
if util.is_wildcard_domain(target_name):
# Ask user which VHosts to enhance.
vhosts = self._choose_vhosts_wildcard(target_name, prefer_ssl=False,
no_ssl_filter_port=port)
else:
matches = self._get_redirect_ranked_matches(target_name, port)
vhosts = [x for x in [self._select_best_name_match(matches)]if x is not None]
return vhosts
def choose_auth_vhosts(self, target_name: str) -> Tuple[List[obj.VirtualHost],
List[obj.VirtualHost]]:
"""Returns a list of HTTP and HTTPS vhosts with a server_name matching target_name.
If no HTTP vhost exists, one will be cloned from the default vhost. If that fails, no HTTP
vhost will be returned.
:param str target_name: non-wildcard domain name
:returns: tuple of HTTP and HTTPS virtualhosts
:rtype: tuple of :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
vhosts = [m['vhost'] for m in self._get_ranked_matches(target_name) if m and 'vhost' in m]
http_vhosts = [vh for vh in vhosts if
self._vhost_listening(vh, str(self.config.http01_port), False)]
https_vhosts = [vh for vh in vhosts if
self._vhost_listening(vh, str(self.config.https_port), True)]
# If no HTTP vhost matches, try create one from the default_server on http01_port.
if not http_vhosts:
try:
http_vhosts = [self._vhost_from_duplicated_default(target_name, False,
str(self.config.http01_port))]
except errors.MisconfigurationError:
http_vhosts = []
return http_vhosts, https_vhosts
def _port_matches(self, test_port: str, matching_port: str) -> bool:
# test_port is a number, matching is a number or "" or None
if matching_port == "" or matching_port is None:
# if no port is specified, Nginx defaults to listening on port 80.
return test_port == self.DEFAULT_LISTEN_PORT
return test_port == matching_port
def _vhost_listening(self, vhost: obj.VirtualHost, port: str, ssl: bool) -> bool:
"""Tests whether a vhost has an address listening on a port with SSL enabled or disabled.
:param `obj.VirtualHost` vhost: The vhost whose addresses will be tested
:param port str: The port number as a string that the address should be bound to
:param bool ssl: Whether SSL should be enabled or disabled on the address
:returns: Whether the vhost has an address listening on the port and protocol.
:rtype: bool
"""
assert self.parser is not None # prepare should already have been called here
# if the 'ssl on' directive is present on the vhost, all its addresses have SSL enabled
all_addrs_are_ssl = self.parser.has_ssl_on_directive(vhost)
# if we want ssl vhosts: either 'ssl on' or 'addr.ssl' should be enabled
# if we want plaintext vhosts: neither 'ssl on' nor 'addr.ssl' should be enabled
_ssl_matches = lambda addr: addr.ssl or all_addrs_are_ssl if ssl else \
not addr.ssl and not all_addrs_are_ssl
# if there are no listen directives at all, Nginx defaults to
# listening on port 80.
if not vhost.addrs:
return port == self.DEFAULT_LISTEN_PORT and ssl == all_addrs_are_ssl
return any(self._port_matches(port, addr.get_port()) and _ssl_matches(addr)
for addr in vhost.addrs)
def _vhost_listening_on_port_no_ssl(self, vhost: obj.VirtualHost, port: str) -> bool:
return self._vhost_listening(vhost, port, False)
def _get_redirect_ranked_matches(self, target_name, port):
"""Gets a ranked list of plaintextish port-listening vhosts matching target_name
Filter all hosts for those listening on port without using ssl.
Rank by how well these match target_name.
:param str target_name: The name to match
:param str port: port number as a string
:returns: list of dicts containing the vhost, the matching name, and
the numerical rank
:rtype: list
"""
all_vhosts = self.parser.get_vhosts()
def _vhost_matches(vhost, port):
return self._vhost_listening_on_port_no_ssl(vhost, port)
matching_vhosts = [vhost for vhost in all_vhosts if _vhost_matches(vhost, port)]
return self._rank_matches_by_name(matching_vhosts, target_name)
def get_all_names(self):
"""Returns all names found in the Nginx Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names: Set[str] = set()
for vhost in self.parser.get_vhosts():
try:
vhost.names.remove("$hostname")
vhost.names.add(socket.gethostname())
except KeyError:
pass
all_names.update(vhost.names)
for addr in vhost.addrs:
host = addr.get_addr()
if common.hostname_regex.match(host):
# If it's a hostname, add it to the names.
all_names.add(host)
elif not common.private_ips_regex.match(host):
# If it isn't a private IP, do a reverse DNS lookup
try:
if addr.ipv6:
host = addr.get_ipv6_exploded()
socket.inet_pton(socket.AF_INET6, host)
else:
socket.inet_pton(socket.AF_INET, host)
all_names.add(socket.gethostbyaddr(host)[0])
except (socket.error, socket.herror, socket.timeout):
continue
return util.get_filtered_names(all_names)
def _get_snakeoil_paths(self):
"""Generate invalid certs that let us create ssl directives for Nginx"""
# TODO: generate only once
tmp_dir = os.path.join(self.config.work_dir, "snakeoil")
le_key = crypto_util.generate_key(
key_size=1024, key_dir=tmp_dir, keyname="key.pem",
strict_permissions=self.config.strict_permissions)
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, le_key.pem)
cert = acme_crypto_util.gen_ss_cert(key, domains=[socket.gethostname()])
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert)
cert_file, cert_path = util.unique_file(
os.path.join(tmp_dir, "cert.pem"), mode="wb")
with cert_file:
cert_file.write(cert_pem)
return cert_path, le_key.file
def _make_server_ssl(self, vhost):
"""Make a server SSL.
Make a server SSL by adding new listen and SSL directives.
:param vhost: The vhost to add SSL to.
:type vhost: :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
https_port = self.config.https_port
ipv6info = self.ipv6_info(https_port)
ipv6_block = ['']
ipv4_block = ['']
# If the vhost was implicitly listening on the default Nginx port,
# have it continue to do so.
if not vhost.addrs:
listen_block = [['\n ', 'listen', ' ', self.DEFAULT_LISTEN_PORT]]
self.parser.add_server_directives(vhost, listen_block)
if vhost.ipv6_enabled():
ipv6_block = ['\n ',
'listen',
' ',
'[::]:{0}'.format(https_port),
' ',
'ssl']
if not ipv6info[1]:
# ipv6only=on is absent in global config
ipv6_block.append(' ')
ipv6_block.append('ipv6only=on')
if vhost.ipv4_enabled():
ipv4_block = ['\n ',
'listen',
' ',
'{0}'.format(https_port),
' ',
'ssl']
snakeoil_cert, snakeoil_key = self._get_snakeoil_paths()
ssl_block = ([
ipv6_block,
ipv4_block,
['\n ', 'ssl_certificate', ' ', snakeoil_cert],
['\n ', 'ssl_certificate_key', ' ', snakeoil_key],
['\n ', 'include', ' ', self.mod_ssl_conf],
['\n ', 'ssl_dhparam', ' ', self.ssl_dhparams],
])
self.parser.add_server_directives(
vhost, ssl_block)
##################################
# enhancement methods (Installer)
##################################
def supported_enhancements(self):
"""Returns currently supported enhancements."""
return ['redirect', 'ensure-http-header', 'staple-ocsp']
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~certbot.plugins.enhancements.ENHANCEMENTS`
documentation for appropriate parameter.
"""
try:
return self._enhance_func[enhancement](domain, options)
except (KeyError, ValueError):
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
def _has_certbot_redirect(self, vhost, domain):
test_redirect_block = _test_block_from_block(_redirect_block_for_domain(domain))
return vhost.contains_list(test_redirect_block)
def _set_http_header(self, domain, header_substring):
"""Enables header identified by header_substring on domain.
If the vhost is listening plaintextishly, separates out the relevant
directives into a new server block, and only add header directive to
HTTPS block.
:param str domain: the domain to enable header for.
:param str header_substring: String to uniquely identify a header.
e.g. Strict-Transport-Security, Upgrade-Insecure-Requests
:returns: Success
:raises .errors.PluginError: If no viable HTTPS host can be created or
set with header header_substring.
"""
if not header_substring in constants.HEADER_ARGS:
raise errors.NotSupportedError(
f"{header_substring} is not supported by the nginx plugin.")
vhosts = self.choose_vhosts(domain)
if not vhosts:
raise errors.PluginError(
"Unable to find corresponding HTTPS host for enhancement.")
for vhost in vhosts:
if vhost.has_header(header_substring):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
# if there is no separate SSL block, break the block into two and
# choose the SSL block.
if vhost.ssl and any(not addr.ssl for addr in vhost.addrs):
_, vhost = self._split_block(vhost)
header_directives = [
['\n ', 'add_header', ' ', header_substring, ' '] +
constants.HEADER_ARGS[header_substring],
['\n']]
self.parser.add_server_directives(vhost, header_directives)
def _add_redirect_block(self, vhost, domain):
"""Add redirect directive to vhost
"""
redirect_block = _redirect_block_for_domain(domain)
self.parser.add_server_directives(
vhost, redirect_block, insert_at_top=True)
def _split_block(self, vhost, only_directives=None):
"""Splits this "virtual host" (i.e. this nginx server block) into
separate HTTP and HTTPS blocks.
:param vhost: The server block to break up into two.
:param list only_directives: If this exists, only duplicate these directives
when splitting the block.
:type vhost: :class:`~certbot_nginx._internal.obj.VirtualHost`
:returns: tuple (http_vhost, https_vhost)
:rtype: tuple of type :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
http_vhost = self.parser.duplicate_vhost(vhost, only_directives=only_directives)
def _ssl_match_func(directive):
return 'ssl' in directive
def _ssl_config_match_func(directive):
return self.mod_ssl_conf in directive
def _no_ssl_match_func(directive):
return 'ssl' not in directive
# remove all ssl addresses and related directives from the new block
for directive in self.SSL_DIRECTIVES:
self.parser.remove_server_directives(http_vhost, directive)
self.parser.remove_server_directives(http_vhost, 'listen', match_func=_ssl_match_func)
self.parser.remove_server_directives(http_vhost, 'include',
match_func=_ssl_config_match_func)
# remove all non-ssl addresses from the existing block
self.parser.remove_server_directives(vhost, 'listen', match_func=_no_ssl_match_func)
return http_vhost, vhost
def _enable_redirect(self, domain, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
If the vhost is listening plaintextishly, separate out the
relevant directives into a new server block and add a rewrite directive.
.. note:: This function saves the configuration
:param str domain: domain to enable redirect for
:param unused_options: Not currently used
:type unused_options: Not Available
"""
port = self.DEFAULT_LISTEN_PORT
# If there are blocks listening plaintextishly on self.DEFAULT_LISTEN_PORT,
# choose the most name-matching one.
vhosts = self.choose_redirect_vhosts(domain, port)
if not vhosts:
logger.info("No matching insecure server blocks listening on port %s found.",
self.DEFAULT_LISTEN_PORT)
return
for vhost in vhosts:
self._enable_redirect_single(domain, vhost)
def _enable_redirect_single(self, domain, vhost):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
If the vhost is listening plaintextishly, separate out the
relevant directives into a new server block and add a rewrite directive.
.. note:: This function saves the configuration
:param str domain: domain to enable redirect for
:param `~obj.Vhost` vhost: vhost to enable redirect for
"""
if vhost.ssl:
http_vhost, _ = self._split_block(vhost, ['listen', 'server_name'])
# Add this at the bottom to get the right order of directives
return_404_directive = [['\n ', 'return', ' ', '404']]
self.parser.add_server_directives(http_vhost, return_404_directive)
vhost = http_vhost
if self._has_certbot_redirect(vhost, domain):
logger.info("Traffic on port %s already redirecting to ssl in %s",
self.DEFAULT_LISTEN_PORT, vhost.filep)
else:
# Redirect plaintextish host to https
self._add_redirect_block(vhost, domain)
logger.info("Redirecting all traffic on port %s to ssl in %s",
self.DEFAULT_LISTEN_PORT, vhost.filep)
def _enable_ocsp_stapling(self, domain, chain_path):
"""Include OCSP response in TLS handshake
:param str domain: domain to enable OCSP response for
:param chain_path: chain file path
:type chain_path: `str` or `None`
"""
vhosts = self.choose_vhosts(domain)
for vhost in vhosts:
self._enable_ocsp_stapling_single(vhost, chain_path)
def _enable_ocsp_stapling_single(self, vhost, chain_path):
"""Include OCSP response in TLS handshake
:param str vhost: vhost to enable OCSP response for
:param chain_path: chain file path
:type chain_path: `str` or `None`
"""
if self.version < (1, 3, 7):
raise errors.PluginError("Version 1.3.7 or greater of nginx "
"is needed to enable OCSP stapling")
if chain_path is None:
raise errors.PluginError(
"--chain-path is required to enable "
"Online Certificate Status Protocol (OCSP) stapling "
"on nginx >= 1.3.7.")
stapling_directives = [
['\n ', 'ssl_trusted_certificate', ' ', chain_path],
['\n ', 'ssl_stapling', ' ', 'on'],
['\n ', 'ssl_stapling_verify', ' ', 'on'], ['\n']]
try:
self.parser.add_server_directives(vhost,
stapling_directives)
except errors.MisconfigurationError as error:
logger.debug(str(error))
raise errors.PluginError("An error occurred while enabling OCSP "
"stapling for {0}.".format(vhost.names))
self.save_notes += ("OCSP Stapling was enabled "
"on SSL Vhost: {0}.\n".format(vhost.filep))
self.save_notes += "\tssl_trusted_certificate {0}\n".format(chain_path)
self.save_notes += "\tssl_stapling on\n"
self.save_notes += "\tssl_stapling_verify on\n"
######################################
# Nginx server management (Installer)
######################################
def restart(self):
"""Restarts nginx server.
:raises .errors.MisconfigurationError: If either the reload fails.
"""
nginx_restart(self.conf('ctl'), self.nginx_conf, self.conf('sleep-seconds'))
def config_test(self):
"""Check the configuration of Nginx for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
util.run_script([self.conf('ctl'), "-c", self.nginx_conf, "-t"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def _nginx_version(self):
"""Return results of nginx -V
:returns: version text
:rtype: str
:raises .PluginError:
Unable to run Nginx version command
"""
try:
proc = subprocess.run(
[self.conf('ctl'), "-c", self.nginx_conf, "-V"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
check=False,
env=util.env_no_snap_for_external_calls())
text = proc.stderr # nginx prints output to stderr
except (OSError, ValueError) as error:
logger.debug(str(error), exc_info=True)
raise errors.PluginError(
"Unable to run %s -V" % self.conf('ctl'))
return text
def get_version(self):
"""Return version of Nginx Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError:
Unable to find Nginx version or version is unsupported
"""
text = self._nginx_version()
version_regex = re.compile(r"nginx version: ([^/]+)/([0-9\.]*)", re.IGNORECASE)
version_matches = version_regex.findall(text)
sni_regex = re.compile(r"TLS SNI support enabled", re.IGNORECASE)
sni_matches = sni_regex.findall(text)
ssl_regex = re.compile(r" --with-http_ssl_module")
ssl_matches = ssl_regex.findall(text)
if not version_matches:
raise errors.PluginError("Unable to find Nginx version")
if not ssl_matches:
raise errors.PluginError(
"Nginx build is missing SSL module (--with-http_ssl_module).")
if not sni_matches:
raise errors.PluginError("Nginx build doesn't support SNI")
product_name, product_version = version_matches[0]
if product_name != 'nginx':
logger.warning("NGINX derivative %s is not officially supported by"
" certbot", product_name)
nginx_version = tuple(int(i) for i in product_version.split("."))
# nginx < 0.8.48 uses machine hostname as default server_name instead of
# the empty string
if nginx_version < (0, 8, 48):
raise errors.NotSupportedError("Nginx version must be 0.8.48+")
return nginx_version
def _get_openssl_version(self):
"""Return version of OpenSSL linked to Nginx.
Version is returned as string. If no version can be found, empty string is returned.
:returns: openssl_version
:rtype: str
:raises .PluginError:
Unable to run Nginx version command
"""
text = self._nginx_version()
matches = re.findall(r"running with OpenSSL ([^ ]+) ", text)
if not matches:
matches = re.findall(r"built with OpenSSL ([^ ]+) ", text)
if not matches:
logger.warning("NGINX configured with OpenSSL alternatives is not officially"
" supported by Certbot.")
return ""
return matches[0]
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Nginx to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.config_root,
version=".".join(str(i) for i in self.version))
)
def auth_hint(self, failed_achalls): # pragma: no cover
return (
"The Certificate Authority failed to verify the temporary nginx configuration changes "
"made by Certbot. Ensure the listed domains point to this nginx server and that it is "
"accessible from the internet."
)
###################################################
# Wrapper functions for Reverter class (Installer)
###################################################
def save(self, title=None, temporary=False):
"""Saves all changes to the configuration files.
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (ie. challenges)
:raises .errors.PluginError: If there was an error in
an attempt to save the configuration, or an error creating a
checkpoint
"""
save_files = set(self.parser.parsed.keys())
self.add_to_checkpoint(save_files, self.save_notes, temporary)
self.save_notes = ""
# Change 'ext' to something else to not override existing conf files
self.parser.filedump(ext='')
if title and not temporary:
self.finalize_checkpoint(title)
def recovery_routine(self):
"""Revert all previously modified files.
Reverts all modified files that have not been saved as a checkpoint
:raises .errors.PluginError: If unable to recover the configuration
"""
super().recovery_routine()
self.new_vhost = None
self.parser.load()
def revert_challenge_config(self):
"""Used to cleanup challenge configurations.
:raises .errors.PluginError: If unable to revert the challenge config.
"""
self.revert_temporary_config()
self.new_vhost = None
self.parser.load()
def rollback_checkpoints(self, rollback=1):
"""Rollback saved checkpoints.
:param int rollback: Number of checkpoints to revert
:raises .errors.PluginError: If there is a problem with the input or
the function is unable to correctly revert the configuration
"""
super().rollback_checkpoints(rollback)
self.new_vhost = None
self.parser.load()
###########################################################################
# Challenges Section for Authenticator
###########################################################################
def get_chall_pref(self, unused_domain):
"""Return list of challenge preferences."""
return [challenges.HTTP01]
# Entry point in main.py for performing challenges
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out += len(achalls)
responses = [None] * len(achalls)
http_doer = http_01.NginxHttp01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
http_doer.add_chall(achall, i)
http_response = http_doer.perform()
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge types
self.restart()
# Go through all of the challenges and assign them to the proper place
# in the responses return value. All responses must be in the same order
# as the original challenges.
for i, resp in enumerate(http_response):
responses[http_doer.indices[i]] = resp
return responses
# called after challenges are performed
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out -= len(achalls)
# If all of the challenges have been finished, clean up everything
if self._chall_out <= 0:
self.revert_challenge_config()
self.restart()
def _test_block_from_block(block):
test_block = nginxparser.UnspacedList(block)
parser.comment_directive(test_block, 0)
return test_block[:-1]
def _redirect_block_for_domain(domain):
updated_domain = domain
match_symbol = '='
if util.is_wildcard_domain(domain):
match_symbol = '~'
updated_domain = updated_domain.replace('.', r'\.')
updated_domain = updated_domain.replace('*', '[^.]+')
updated_domain = '^' + updated_domain + '$'
redirect_block = [[
['\n ', 'if', ' ', '($host', ' ', match_symbol, ' ', '%s)' % updated_domain, ' '],
[['\n ', 'return', ' ', '301', ' ', 'https://$host$request_uri'],
'\n ']],
['\n']]
return redirect_block
def nginx_restart(nginx_ctl, nginx_conf, sleep_duration):
"""Restarts the Nginx Server.
.. todo:: Nginx restart is fatal if the configuration references
non-existent SSL cert/key files. Remove references to /etc/letsencrypt
before restart.
:param str nginx_ctl: Path to the Nginx binary.
:param str nginx_conf: Path to the Nginx configuration file.
:param int sleep_duration: How long to sleep after sending the reload signal.
"""
try:
reload_output: Text = u""
with tempfile.TemporaryFile() as out:
proc = subprocess.run([nginx_ctl, "-c", nginx_conf, "-s", "reload"],
env=util.env_no_snap_for_external_calls(),
stdout=out, stderr=out, check=False)
out.seek(0)
reload_output = out.read().decode("utf-8")
if proc.returncode != 0:
logger.debug("nginx reload failed:\n%s", reload_output)
# Maybe Nginx isn't running - try start it
# Write to temporary files instead of piping because of communication issues on Arch
# https://github.com/certbot/certbot/issues/4324
with tempfile.TemporaryFile() as out:
nginx_proc = subprocess.run([nginx_ctl, "-c", nginx_conf],
stdout=out, stderr=out, env=util.env_no_snap_for_external_calls(), check=False)
if nginx_proc.returncode != 0:
out.seek(0)
# Enter recovery routine...
raise errors.MisconfigurationError(
"nginx restart failed:\n%s" % out.read().decode("utf-8"))
except (OSError, ValueError):
raise errors.MisconfigurationError("nginx restart failed")
# Nginx can take a significant duration of time to fully apply a new config, depending
# on size and contents (https://github.com/certbot/certbot/issues/7422). Lacking a way
# to reliably identify when this process is complete, we provide the user with control
# over how long Certbot will sleep after reloading the configuration.
if sleep_duration > 0:
time.sleep(sleep_duration)
def _determine_default_server_root():
if os.environ.get("CERTBOT_DOCS") == "1":
default_server_root = "%s or %s" % (constants.LINUX_SERVER_ROOT,
constants.FREEBSD_DARWIN_SERVER_ROOT)
else:
default_server_root = constants.CLI_DEFAULTS["server_root"]
return default_server_root
| {
"content_hash": "ffab54011eb96465e66ec7a7d02f9cdd",
"timestamp": "",
"source": "github",
"line_count": 1272,
"max_line_length": 118,
"avg_line_length": 40.9622641509434,
"alnum_prop": 0.5971326577614002,
"repo_name": "stweil/letsencrypt",
"id": "ac24edb0af6a7fc5ad0501e820b7c6f8de96279d",
"size": "52137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "certbot-nginx/certbot_nginx/_internal/configurator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4997"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4270"
},
{
"name": "Python",
"bytes": "1355274"
},
{
"name": "Shell",
"bytes": "120566"
},
{
"name": "Standard ML",
"bytes": "256"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from ..views import StatusChangedNotificationView
urlpatterns = patterns('',
url(r'^$', StatusChangedNotificationView.as_view(), name='cowry-docdata-status-changed'),
)
| {
"content_hash": "00bb1f1cae276d5280a6a0f5b22a2154",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 93,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.7649769585253456,
"repo_name": "onepercentclub/onepercentclub-site",
"id": "b61f4c2bd17b12c9addfe97107002cabc2e84051",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/cowry_docdata/urls/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13896"
},
{
"name": "CSS",
"bytes": "351343"
},
{
"name": "HTML",
"bytes": "898027"
},
{
"name": "Handlebars",
"bytes": "246489"
},
{
"name": "JavaScript",
"bytes": "168884"
},
{
"name": "Python",
"bytes": "1511371"
},
{
"name": "Ruby",
"bytes": "1050"
},
{
"name": "Shell",
"bytes": "74046"
}
],
"symlink_target": ""
} |
import sys
from PyQt5.QtWidgets import *
if __name__ == '__main__':
app = QApplication(sys.argv)
w = QWidget()
ui = Ui_Form()
| {
"content_hash": "daffee0796688b2f30754406a77ff6af",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 32,
"avg_line_length": 17.5,
"alnum_prop": 0.5785714285714286,
"repo_name": "Great-Li-Xin/PythonDev",
"id": "0b79faf425d8915d19dcd89a9c34227dd892663a",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QT-Cryption/cryption.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "15519"
},
{
"name": "ColdFusion",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "9727"
},
{
"name": "Java",
"bytes": "9296"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "8114"
},
{
"name": "Python",
"bytes": "487811"
}
],
"symlink_target": ""
} |
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings
All settings which are to configure a specific template are located
here. Deployers should ideally not need to edit any other files outside
of their template folder.
"""
T = current.T
# Pre-Populate
# http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/PrePopulate
# Configure/disable pre-population of the database.
# To pre-populate the database On 1st run should specify directory(s) in
# /modules/templates/
# eg:
# ["default"] (1 is a shortcut for this)
# ["default", "default/users"]
# Unless doing a manual DB migration, where prepopulate = 0
# In Production, prepopulate = 0 (to save 1x DAL hit every page)
#settings.base.prepopulate = 1
# Theme (folder to use for views/layout.html)
#settings.base.theme = "default"
# Enable Guided Tours
settings.base.guided_tour = True
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
# Allow a new user to be linked to a record (and a new record will be created if it doesn't already exist)
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# "volunteer":T("Volunteer"),
# "member":T("Member")}
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# The name of the teams that users are added to when they opt-in to receive alerts
#settings.auth.opt_in_team_list = ["Updates"]
# Uncomment this to set the opt in default to True
#settings.auth.opt_in_default = True
# Uncomment this to request the Home Phone when a user registers
#settings.auth.registration_requests_home_phone = True
# Uncomment this to request the Mobile Phone when a user registers
#settings.auth.registration_requests_mobile_phone = True
# Uncomment this to have the Mobile Phone selection during registration be mandatory
#settings.auth.registration_mobile_phone_mandatory = True
# Uncomment this to request the Organisation when a user registers
#settings.auth.registration_requests_organisation = True
# Uncomment this to have the Organisation selection during registration be mandatory
#settings.auth.registration_organisation_required = True
# Uncomment this to have the Organisation input hidden unless the user enters a non-whitelisted domain
#settings.auth.registration_organisation_hidden = True
# Uncomment this to default the Organisation during registration
#settings.auth.registration_organisation_default = "My Organisation"
# Uncomment this to request the Organisation Group when a user registers
#settings.auth.registration_requests_organisation_group = True
# Uncomment this to have the Organisation Group selection during registration be mandatory
#settings.auth.registration_organisation_group_required = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Uncomment this to allow Admin to see Organisations in User Admin even if the Registration doesn't request this
#settings.auth.admin_sees_organisation = True
# Uncomment to hide the UTC Offset in Registration/Profile
#settings.auth.show_utc_offset = False
# Uncomment to set the default role UUIDs assigned to newly-registered users
# This is a dictionary of lists, where the key is the realm that the list of roles applies to
# The key 0 implies not realm restricted
# The keys "organisation_id" and "site_id" can be used to indicate the user's "organisation_id" and "site_id"
#settings.auth.registration_roles = { 0: ["STAFF", "PROJECT_EDIT"]}
# Define which entity types to use as realm entities in role manager
#settings.auth.realm_entity_types = ("org_organisation",)
# Uncomment to activate entity role manager tabs for OrgAdmins
#settings.auth.entity_role_manager = True
# Define modules for entity role manager
#settings.auth.role_modules = OrderedDict([])
# Define access levels for entity role manager
#settings.auth.access_levels = OrderedDict([])
# Uncomment this to enable record approval
#settings.auth.record_approval = True
# Uncomment this and specify a list of tablenames for which record approval is required
#settings.auth.record_approval_required_for = ("org_organisation",)
# Uncomment this to request an image when users register
#settings.auth.registration_requests_image = True
# Uncomment this to direct newly-registered users to their volunteer page to be able to add extra details
# NB This requires Verification/Approval to be Off
# @ToDo: Extend to all optional Profile settings: Homepage, Twitter, Facebook, Mobile Phone, Image
#settings.auth.registration_volunteer = True
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
#settings.auth.terms_of_service = True
# Uncomment this to allow users to Login using Gmail's SMTP
#settings.auth.gmail_domains = ["gmail.com"]
# Uncomment this to allow users to Login using OpenID
#settings.auth.openid = True
# Uncomment this to enable presence records on login based on HTML5 geolocations
#settings.auth.set_presence_on_login = True
# Uncomment this and specify a list of location levels to be ignored by presence records
#settings.auth.ignore_levels_for_presence = ("L0", "L1", "L2", "L3")
# Uncomment this to enable the creation of new locations if a user logs in from an unknown location. Warning: This may lead to many useless location entries
#settings.auth.create_unknown_locations = True
# L10n settings
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
#settings.L10n.languages = OrderedDict([
# ("ar", "العربية"),
# ("bs", "Bosanski"),
# ("en", "English"),
# ("fr", "Français"),
# ("de", "Deutsch"),
# ("el", "ελληνικά"),
# ("es", "Español"),
# ("it", "Italiano"),
# ("ja", "日本語"),
# ("km", "ភាសាខ្មែរ"),
# ("ko", "한국어"),
# ("mn", "Монгол хэл"), # Mongolian
# ("my", "မြန်မာစာ"), # Burmese
# ("ne", "नेपाली"), # Nepali
# ("prs", "دری"), # Dari
# ("ps", "پښتو"), # Pashto
# ("pt", "Português"),
# ("pt-br", "Português (Brasil)"),
# ("ru", "русский"),
# ("tet", "Tetum"),
# ("tl", "Tagalog"),
# ("tr", "Türkçe"),
# ("ur", "اردو"),
# ("vi", "Tiếng Việt"),
# ("zh-cn", "中文 (简体)"),
# ("zh-tw", "中文 (繁體)"),
#])
# Default language for Language Toolbar (& GIS Locations in future)
#settings.L10n.default_language = "en"
# Uncomment to Hide the language toolbar
#settings.L10n.display_toolbar = False
# Default timezone for users
#settings.L10n.utc_offset = "+0000"
# Uncomment these to use US-style dates in English
#settings.L10n.date_format = "%m-%d-%Y"
#settings.L10n.time_format = "%H:%M:%S"
# Start week on Sunday
#settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
#settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
#settings.L10n.default_country_code = 1
# Make last name in person/user records mandatory
#settings.L10n.mandatory_lastname = True
# Configure the list of Religions
#settings.L10n.religions = {"none": T("none"),
#"christian": T("Christian"),
#"muslim": T("Muslim"),
#"jewish": T("Jewish"),
#"buddhist": T("Buddhist"),
#"hindu": T("Hindu"),
#"bahai": T("Bahai"),
#"other": T("other")
#}
# Uncomment this to Translate CMS Series Names
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Layer Names
#settings.L10n.translate_gis_layer = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Uncomment this for Alternate Location Names
#settings.L10n.name_alt_gis_location = True
# Uncomment this to Translate Organisation Names/Acronyms
#settings.L10n.translate_org_organisation = True
# Finance settings
#settings.fin.currencies = {
# "EUR" : T("Euros"),
# "GBP" : T("Great British Pounds"),
# "USD" : T("United States Dollars"),
#}
#settings.fin.currency_default = "USD"
#settings.fin.currency_writable = False # False currently breaks things
# PDF settings
# Default page size for reports (defaults to A4)
#settings.base.paper_size = T("Letter")
# Location of Logo used in pdfs headers
#settings.ui.pdf_logo = "static/img/mylogo.png"
# GIS (Map) settings
# Size of the Embedded Map
# Change this if-required for your theme
# NB API can override this in specific modules
#settings.gis.map_height = 600
#settings.gis.map_width = 1000
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
#settings.gis.countries = ("US",)
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Hide the Map-based selection tool in the Location Selector
#settings.gis.map_selector = False
# Show LatLon boxes in the Location Selector
#settings.gis.latlon_selector = True
# Use Building Names as a separate field in Street Addresses?
#settings.gis.building_name = False
# Use a non-default fillColor for Clustered points
#settings.gis.cluster_fill = "8087ff"
# Disable the label for clustered points
#settings.gis.cluster_label = False
# Use a non-default strokeColor for Clustered points
#settings.gis.cluster_stroke = "2b2f76"
# Use a non-default fillColor for Selected points
#settings.gis.select_fill = "ffdc33"
# Use a non-default strokeColor for Selected points
#settings.gis.select_stroke = "ff9933"
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
# Uncomment to fall back to country LatLon to show resources, if nothing better available
#settings.gis.display_L0 = True
# Currently unused
#settings.gis.display_L1 = False
# Uncomment this to do deduplicate lookups on Imports via PCode (as alternative to Name)
#settings.gis.lookup_code = "PCode"
# Set this if there will be multiple areas in which work is being done,
# and a menu to select among them is wanted.
#settings.gis.menu = "Maps"
# Maximum Marker Size
# (takes effect only on display)
#settings.gis.marker_max_height = 35
#settings.gis.marker_max_width = 30
# Duplicate Features so that they show wrapped across the Date Line?
# Points only for now
# lon<0 have a duplicate at lon+360
# lon>0 have a duplicate at lon-360
#settings.gis.duplicate_features = True
# Uncomment to use CMS to provide Metadata on Map Layers
#settings.gis.layer_metadata = True
# Uncomment to show Clear Layers tool
#settings.gis.clear_layers = True
# Uncomment to hide the Geolocation control
#settings.gis.geolocate_control = False
# Uncomment to hide the WMS GetFeatureInfo control
#settings.gis.getfeature_control = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to hide the Base Layers folder in the LayerTree
#settings.gis.layer_tree_base = False
# Uncomment to hide the Overlays folder in the LayerTree
#settings.gis.layer_tree_overlays = False
# Uncomment to change the label of the Overlays folder in the LayerTree
#settings.gis.label_overlays = "Overlays"
# Uncomment to not expand the folders in the LayerTree by default
#settings.gis.layer_tree_expanded = False
# Uncomment to have custom folders in the LayerTree use Radio Buttons
#settings.gis.layer_tree_radio = True
# Uncomment to display the Map Legend as a floating DIV
#settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
#settings.gis.location_represent_address_only = True
# Mouse Position: 'normal', 'mgrs' or None
#settings.gis.mouse_position = "mgrs"
# Uncomment to show the Navigation controls on the toolbar
#settings.gis.nav_controls = True
# Uncomment to hide the Overview map
#settings.gis.overview = False
# Uncomment to hide the permalink control
#settings.gis.permalink = False
# Resources which can be directly added to the main map
#settings.gis.poi_create_resources = None
#settings.gis.poi_create_resources = [{"c":"event", "f":"incident_report", "table": "gis_poi", label": T("Add Incident Report") ,"tooltip": T("Add Incident Report"), "layer":"Incident Reports", "location": "popup"}]
# PoIs to export in KML/OSM feeds from Admin locations
#settings.gis.poi_export_resources = ["cr_shelter", "hms_hospital", "org_office"]
# Uncomment to show the Print control:
# http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
#settings.gis.print_button = True
# Uncomment to save a screenshot whenever a saved map is saved
#settings.gis.config_screenshot = (820, 410)
# Uncomment to hide the Save control, or set to "float"
#settings.gis.save = False
# Uncomment to hide the ScaleLine control
#settings.gis.scaleline = False
# Uncomment to hide the GeoNames search box
#settings.gis.search_geonames = False
# Uncomment to modify the Simplify Tolerance
#settings.gis.simplify_tolerance = 0.001
# Uncomment to Hide the Toolbar from the main Map
#settings.gis.toolbar = False
# Uncomment to show Catalogue Layers in Map Widgets (e.g. Profile & Summary pages)
#settings.gis.widget_catalogue_layers = True
# Uncomment to show WMS Browser in Map Widgets (e.g. Profile & Summary pages)
# - NB This also requires the active gis_config to have one configured
#settings.gis.widget_wms_browser = True
# Uncomment to hide the Zoom control
#settings.gis.zoomcontrol = False
# Uncomment to open Location represent links in a Popup Window
#settings.gis.popup_location_link = True
# GeoNames username
settings.gis.geonames_username = "eden_test"
# Messaging Settings
# If you wish to use a parser.py in another folder than "default"
#settings.msg.parser = "mytemplatefolder"
# Uncomment to turn off enforcement of E.123 international phone number notation
#settings.msg.require_international_phone_numbers = False
# Uncomment to make basestation codes unique
#settings.msg.basestation_code_unique = True
# Use 'soft' deletes
#settings.security.archive_not_delete = False
# AAA Settings
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
#
#settings.security.policy = 7 # Organisation-ACLs
# Ownership-rule for records without owner:
# True = not owned by any user (strict ownership, default)
# False = owned by any authenticated user
#settings.security.strict_ownership = False
# Audit
# - can be a callable for custom hooks (return True to also perform normal logging, or False otherwise)
# NB Auditing (especially Reads) slows system down & consumes diskspace
#settings.security.audit_read = True
#settings.security.audit_write = True
# Lock-down access to Map Editing
#settings.security.map = True
# Allow non-MapAdmins to edit hierarchy locations? Defaults to True if not set.
# (Permissions can be set per-country within a gis_config)
#settings.gis.edit_Lx = False
# Allow non-MapAdmins to edit group locations? Defaults to False if not set.
#settings.gis.edit_GR = True
# Note that editing of locations used as regions for the Regions menu is always
# restricted to MapAdmins.
# Uncomment to disable that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# Uncomment to Disable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False
# Increase these if having scalability issues or slow connections
#settings.ui.autocomplete_delay = 800
#settings.ui.autocomplete_min_chars = 2
#settings.ui.filter_auto_submit = 800
#settings.ui.report_auto_submit = 800
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# Enable this to have Open links in IFrames open a full page in a new tab
#settings.ui.iframe_opens_full = True
# Enable this to change the label for 'Attachments' tabs
#settings.ui.label_attachments = "Attachments"
# Uncomment to configure the LocationSelector labels for the Map button with Points
#settings.label_locationselector_map_point_add = "Find on Map"
#settings.label_locationselector_map_point_view = "Find on Map"
# Enable this to change the label for 'Mobile Phone'
#settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
#settings.ui.label_postcode = "ZIP Code"
# Enable Social Media share buttons
#settings.ui.social_buttons = True
# Enable this to show pivot table options form by default
#settings.ui.hide_report_options = False
# Uncomment to show created_by/modified_by using Names not Emails
#settings.ui.auth_user_represent = "name"
# Uncomment to control the dataTables layout: https://datatables.net/reference/option/dom
# Default:
#settings.ui.datatables_dom = "fril<'dataTable_table't>pi"
# dataTables.Foundation.js would set to this:
#settings.ui.datatables_dom = "<'row'<'large-6 columns'l><'large-6 columns'f>r>t<'row'<'large-6 columns'i><'large-6 columns'p>>"
# Move the export_formats after the pagination control
#settings.ui.datatables_initComplete = '''$('.dataTables_paginate').after($('.dt-export-options'))'''
# Uncomment for dataTables to use a different paging style:
#settings.ui.datatables_pagingType = "bootstrap"
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ("kml", "pdf", "rss", "xls", "xml")
# Uncomment to change the label/class of FilterForm clear buttons
#settings.ui.filter_clear = "Clear"
# Uncomment to include an Interim Save button on CRUD forms
#settings.ui.interim_save = True
# Uncomment to enable icons on action buttons (requires corresponding CSS)
#settings.ui.use_button_icons = True
# Uncomment to use S3MultiSelectWidget on all dropdowns (currently the Auth Registration page & LocationSelectorWidget2 listen to this)
#settings.ui.multiselect_widget = True
# Theme for the S3HierarchyWidget
#settings.ui.hierarchy_theme = dict(css = "../themes/MYTHEME",
# icons = True,
# stripes = False,
# )
# Uncomment to show a default cancel button in standalone create/update forms
#settings.ui.default_cancel_button = True
# Uncomment to disable responsive behavior of datatables
#settings.ui.datatables_responsive = False
# Uncomment to modify the label of the Permalink
#settings.ui.label_permalink = "Permalink"
# -------------------------------------------------------------------------
# Asset
# Uncomment to have a specific asset type for Telephones
#settings.asset.telephones = True
# -------------------------------------------------------------------------
# CMS
# Uncomment this to hide CMS from module index pages
#settings.cms.hide_index = True
# Uncomment to use Bookmarks in Newsfeed
#settings.cms.bookmarks = True
# Uncomment to use have Filter form in Newsfeed be open by default
#settings.cms.filter_open = True
# Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page
#settings.cms.location_click_filters = True
# Uncomment to use Rich Text editor in Newsfeed
#settings.cms.richtext = True
# Uncomment to show Events in Newsfeed
#settings.cms.show_events = True
# Uncomment to hide Attachments in Newsfeed
#settings.cms.show_attachments = False
# Uncomment to show Links in Newsfeed
#settings.cms.show_links = True
# Uncomment to show Tags in Newsfeed
#settings.cms.show_tags = True
# Uncomment to show post Titles in Newsfeed
#settings.cms.show_titles = True
# Uncomment to use organisation_id instead of created_by in Newsfeed
#settings.cms.organisation = "post_organisation.organisation_id"
# Uncomment to use org_group_id in Newsfeed
#settings.cms.organisation_group = "created_by$org_group_id"
#settings.cms.organisation_group = "post_organisation_group.group_id"
# Uncomment to use person_id instead of created_by in Newsfeed
#settings.cms.person = "person_id"
# -------------------------------------------------------------------------
# Shelters
# Uncomment to use a dynamic population estimation by calculations based on registrations
#settings.cr.shelter_population_dynamic = True
# Uncomment to disable people registration in shelters
#settings.cr.people_registration = False
# -------------------------------------------------------------------------
# Events
# Make Event Types Hierarchical
#settings.event.types_hierarchical = True
# Make Incident Types Hierarchical
#settings.event.incident_types_hierarchical = True
# -------------------------------------------------------------------------
# Members
# Show a CV tab for Members
#settings.member.cv_tab = True
# -------------------------------------------------------------------------
# Persons
# Uncomment to allow person imports to match even without email addresses
#settings.pr.import_update_requires_email = False
# Uncomment this to enable support for third gender
#settings.pr.hide_third_gender = False
# Uncomment to a fuzzy search for duplicates in the new AddPersonWidget2
#settings.pr.lookup_duplicates = True
# Uncomment to hide fields in S3AddPersonWidget[2]
#settings.pr.request_dob = False
#settings.pr.request_gender = False
# Uncomment to show field in S3AddPersonWidget
#settings.pr.request_home_phone = True
# Uncomment to modify the order of Names
#settings.pr.name_format = "%(last_name)s, %(first_name)s %(middle_name)s"
# Uncomment to prevent selecting existing users in the old S3AddPersonWidget
#settings.pr.select_existing = False
# Uncomment to prevent showing HR details in S3PersonAutocompleteWidget results
#settings.pr.search_shows_hr_details = False
# Uncomment to hide Emergency Contacts in Person Contacts page
#settings.pr.show_emergency_contacts = False
# Uncomment to hide the Address tab in person details
#settings.pr.use_address = False
# Show separate Public and Private Contacts Tabs
#settings.pr.contacts_tabs = ("public", "private")
# -------------------------------------------------------------------------
# Organisations
# Uncomment to use an Autocomplete for Organisation lookup fields
#settings.org.autocomplete = True
# Enable the Organisation Sector field
#settings.org.sector = True
# Enable the use of Organisation Branches
#settings.org.branches = True
# Show branches as tree rather than as table
#settings.org.branches_tree_view = True
# Make Facility Types Hierarchical
#settings.org.facility_types_hierarchical = True
# Enable the use of Organisation Groups & what their name is
#settings.org.groups = "Coalition"
#settings.org.groups = "Network"
# Organisation Location context
#settings.org.organisation_location_context = "organisation_location.location_id"
# Make Organisation Types Hierarchical
#settings.org.organisation_types_hierarchical = True
# Make Organisation Types Multiple
#settings.org.organisation_types_multiple = True
# Enable the use of Organisation Regions
#settings.org.regions = True
# Make Organisation Regions Hierarchical
#settings.org.regions_hierarchical = True
# Uncomment to show a Tab for Organisation Resources
#settings.org.resources_tab = True
# Make Services Hierarchical
#settings.org.services_hierarchical = True
# Set the length of the auto-generated org/site code the default is 10
#settings.org.site_code_len = 3
# Set the label for Sites
#settings.org.site_label = "Facility"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
#settings.org.site_last_contacted = True
# Uncomment to use an Autocomplete for Site lookup fields
#settings.org.site_autocomplete = True
# Extra fields to search in Autocompletes & display in Representations
#settings.org.site_autocomplete_fields = ("instance_type", "location_id$L1", "location_id$addr_street", "organisation_id$name")
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = False
# Uncomment to allow Sites to be staffed by Volunteers
#settings.org.site_volunteers = True
# Uncomment to add summary fields for Organisations/Offices for # National/International staff
#settings.org.summary = True
# Enable certain fields just for specific Organisations
# Requires a call to settings.set_org_dependent_field(field)
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = \
# {#"<table name>.<field name>" : ["<Organisation Name>"],
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# Uncomment to make Office codes unique
#settings.org.office_code_unique = True
# Uncomment to make Facility codes unique
#settings.org.facility_code_unique = True
# -------------------------------------------------------------------------
# Human Resource Management
# Uncomment to change the label for 'Staff'
#settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
#settings.hrm.email_required = False
# Uncomment to allow Staff & Volunteers to be registered without an Organisation
#settings.hrm.org_required = False
# Uncomment to if their are only Staff & Volunteers from a single Organisation with no Branches
#settings.hrm.multiple_orgs = False
# Uncomment to disable the 'Send Message' action button
#settings.hrm.compose_button = False
# Uncomment to allow HR records to be deletable rather than just marking them as obsolete
#settings.hrm.deletable = True
# Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin)
#settings.hrm.filter_certificates = True
# Uncomment to allow HRs to have multiple Job Titles
#settings.hrm.multiple_job_titles = True
# Uncomment to have each root Org use a different Job Title Catalog
#settings.hrm.org_dependent_job_titles = True
# Uncomment to hide the Staff resource
#settings.hrm.show_staff = False
# Uncomment to have Staff use their Home Address as fallback if they have no Site defined
#settings.hrm.location_staff = ("site_id", "person_id")
# Uncomment to have Volunteers use their Site Address as fallback if they have no Home Address defined
#settings.hrm.location_vol = ("person_id", "site_id")
# Uncomment this to allow multiple site contacts per site (e.g. if needing a separate contact per sector)
#settings.hrm.site_contact_unique = False
# Uncomment to allow hierarchical categories of Skills, which each need their own set of competency levels.
#settings.hrm.skill_types = True
# Uncomment to disable Staff experience
#settings.hrm.staff_experience = False
# Uncomment to enable Volunteer 'active' field
# - can also be made a function which is called to calculate the status based on recorded hours
#settings.hrm.vol_active = True
# Uncomment to define a Tooltip to show when viewing the Volunteer 'active' field
#settings.hrm.vol_active_tooltip = "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year"
# Uncomment to disable Volunteer experience
#settings.hrm.vol_experience = False
# Uncomment to show the Organisation name in HR represents
#settings.hrm.show_organisation = True
# Uncomment to consolidate tabs into a single CV
#settings.hrm.cv_tab = True
# Uncomment to consolidate tabs into Staff Record (set to False to hide the tab)
#settings.hrm.record_tab = "record"
# Uncomment to disable the use of Volunteer Awards
#settings.hrm.use_awards = False
# Uncomment to disable the use of HR Certificates
#settings.hrm.use_certificates = False
# Uncomment to enable the use of Staff/Volunteer IDs
#settings.hrm.use_code = True
# Uncomment to disable the use of HR Credentials
#settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Description
#settings.hrm.use_description = False
# Uncomment to enable the use of HR Education
#settings.hrm.use_education = True
# Uncomment to disable the use of HR ID Tab
#settings.hrm.use_id = False
# Uncomment to disable the use of HR Address Tab
#settings.hrm.use_address = False
# Uncomment to disable the use of HR Skills
#settings.hrm.use_skills = False
# Uncomment to enable tracking of staff salaries
#settings.hrm.salary = True
# Uncomment to disable the use of HR Teams
#settings.hrm.teams = False
# Uncomment to disable the use of HR Trainings
#settings.hrm.use_trainings = False
# Uncomment this to configure tracking of internal/external training instructors
#settings.hrm.training_instructors = "external"
# Uncomment to use activity types in experience record, specify as {"code":"label", ...}
#settings.hrm.activity_types = {"rdrt": "RDRT Mission"}
# -------------------------------------------------------------------------
# Inventory Management
#settings.inv.collapse_tabs = False
# Uncomment to customise the label for Facilities in Inventory Management
#settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to have Warehouse Types be Organisation-dependent
#settings.inv.org_dependent_warehouse_types = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
#settings.inv.stock_count = True
# Use the term 'Order' instead of 'Shipment'
#settings.inv.shipment_name = "order"
# Uncomment to validate for Unique Warehouse Codes
#settings.inv.warehouse_code_unique = True
# Uncomment to not track pack values
#settings.inv.track_pack_values = False
#settings.inv.show_mode_of_transport = True
#settings.inv.send_show_org = False
#settings.inv.send_show_time_in = True
#settings.inv.send_form_name = "Tally Out Sheet"
#settings.inv.send_short_name = "TO"
#settings.inv.send_ref_field_name = "Tally Out Number"
#settings.inv.recv_form_name = "Acknowledgement Receipt for Donations Received Form"
#settings.inv.recv_shortname = "ARDR"
# Types common to both Send and Receive
#settings.inv.shipment_types = {
# 0: T("-"),
# 1: T("Other Warehouse"),
# 2: T("Donation"),
# 3: T("Foreign Donation"),
# 4: T("Local Purchases"),
# 5: T("Confiscated Goods from Bureau Of Customs")
# }
#settings.inv.send_types = {
# 21: T("Distribution")
# }
#settings.inv.send_type_default = 1
#settings.inv.recv_types = {
# 32: T("Donation"),
# 34: T("Purchase"),
# }
#settings.inv.item_status = {
# 0: current.messages["NONE"],
# 1: T("Dump"),
# 2: T("Sale"),
# 3: T("Reject"),
# 4: T("Surplus")
# }
# -------------------------------------------------------------------------
# Requests Management
# Uncomment to disable Inline Forms in Requests module
#settings.req.inline_forms = False
# Label for Inventory Requests
#settings.req.type_inv_label = "Donations"
# Label for People Requests
#settings.req.type_hrm_label = "Volunteers"
# Label for Requester
#settings.req.requester_label = "Site Contact"
# Uncomment to disable Recurring Request
#settings.req.recurring = False
#settings.req.requester_optional = True
# Uncomment if the User Account logging the Request is NOT normally the Requester
#settings.req.requester_is_author = False
# Filter Requester as being from the Site
#settings.req.requester_from_site = True
# Set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
#settings.req.requester_to_site = True
#settings.req.date_writable = False
# Allow the status for requests to be set manually,
# rather than just automatically from commitments and shipments
#settings.req.status_writable = False
#settings.req.item_quantities_writable = True
#settings.req.skill_quantities_writable = True
#settings.req.show_quantity_transit = False
#settings.req.multiple_req_items = False
#settings.req.prompt_match = False
#settings.req.items_ask_purpose = False
# Uncomment to disable the Commit step in the workflow & simply move direct to Ship
#settings.req.use_commit = False
# Uncomment to have Donations include a 'Value' field
#settings.req.commit_value = True
# Uncomment to allow Donations to be made without a matching Request
#settings.req.commit_without_request = True
# Uncomment if the User Account logging the Commitment is NOT normally the Committer
#settings.req.comittter_is_author = False
# Should Requests ask whether Security is required?
#settings.req.ask_security = True
# Should Requests ask whether Transportation is required?
#settings.req.ask_transport = True
#settings.req.use_req_number = False
#settings.req.generate_req_number = False
#settings.req.req_form_name = "Request Issue Form"
#settings.req.req_shortname = "RIS"
# Restrict the type of requests that can be made, valid values in the
# list are ("Stock", "People", "Other"). If this is commented out then
# all types will be valid.
#settings.req.req_type = ("Stock",)
# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities
#settings.req.summary = True
# Uncomment to restrict adding new commits to Completed commits
#settings.req.req_restrict_on_complete = True
# Custom Crud Strings for specific req_req types
#settings.req.req_crud_strings = dict()
#ADD_ITEM_REQUEST = T("Make a Request for Donations")
# req_req Crud Strings for Item Request (type=1)
#settings.req.req_crud_strings[1] = Storage(
# label_create = ADD_ITEM_REQUEST,
# title_display = T("Request for Donations Details"),
# title_list = T("Requests for Donations"),
# title_update = T("Edit Request for Donations"),
# label_list_button = T("List Requests for Donations"),
# label_delete_button = T("Delete Request for Donations"),
# msg_record_created = T("Request for Donations Added"),
# msg_record_modified = T("Request for Donations Updated"),
# msg_record_deleted = T("Request for Donations Canceled"),
# msg_list_empty = T("No Requests for Donations"))
#ADD_PEOPLE_REQUEST = T("Make a Request for Volunteers")
# req_req Crud Strings for People Request (type=3)
#settings.req.req_crud_strings[3] = Storage(
# label_create = ADD_PEOPLE_REQUEST,
# title_display = T("Request for Volunteers Details"),
# title_list = T("Requests for Volunteers"),
# title_update = T("Edit Request for Volunteers"),
# label_list_button = T("List Requests for Volunteers"),
# label_delete_button = T("Delete Request for Volunteers"),
# msg_record_created = T("Request for Volunteers Added"),
# msg_record_modified = T("Request for Volunteers Updated"),
# msg_record_deleted = T("Request for Volunteers Canceled"),
# msg_list_empty = T("No Requests for Volunteers"))
# -------------------------------------------------------------------------
# Supply
#settings.supply.use_alt_name = False
# Do not edit after deployment
#settings.supply.catalog_default = T("Default")
# -------------------------------------------------------------------------
# Projects
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
#settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
#settings.project.mode_drr = True
# Uncomment this to use settings suitable for detailed Task management
#settings.project.mode_task = True
# Uncomment this to use Activities for Projects & Tasks
#settings.project.activities = True
# Uncomment this to use Activity Types for Activities & Projects
#settings.project.activity_types = True
# Uncomment this to filter dates in Activities
#settings.project.activity_filter_year = True
# Uncomment this to use Codes for projects
#settings.project.codes = True
# Uncomment this to call project locations 'Communities'
#settings.project.community = True
# Uncomment this to enable Hazards in 3W projects
#settings.project.hazards = True
# Uncomment this to enable Indicators in projects
#settings.project.indicators = True
# Uncomment this to enable Milestones in projects
#settings.project.milestones = True
# Uncomment this to use Projects for Activities & Tasks
#settings.project.projects = True
# Uncomment this to disable Sectors in projects
#settings.project.sectors = False
# Uncomment this to enable Programmes in projects
#settings.project.programmes = True
# Uncomment this to use Tags in Tasks
#settings.project.task_tag = True
# Uncomment this to enable Themes in 3W projects
#settings.project.themes = True
# Uncomment this to use Theme Percentages for projects
#settings.project.theme_percentages = True
# Uncomment this to use multiple Budgets per project
#settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
#settings.project.multiple_organisations = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Lead Implementer"), # T("Host National Society")
# 2: T("Partner"), # T("Partner National Society")
# 3: T("Donor"),
# 4: T("Customer"), # T("Beneficiary")?
# 5: T("Super"), # T("Beneficiary")?
#}
#settings.project.organisation_lead_role = 1
# Uncomment to customise the list of options for the Priority of a Task.
# NB Be very cautious about doing this (see docstring in modules/s3cfg.py)
#settings.project.task_priority_opts =
# Uncomment to customise the list of options for the Status of a Task.
# NB Be very cautious about doing this (see docstring in modules/s3cfg.py)
#settings.project.task_status_opts =
# -------------------------------------------------------------------------
# Incidents
# Uncomment this to use vehicles when responding to Incident Reports
#settings.irs.vehicle = True
# -------------------------------------------------------------------------
# Transport
# Uncomment to make Airport codes unique
#settings.transport.airport_code_unique = True
# Uncomment to make Seaport codes unique
#settings.transport.seaport_code_unique = True
# Uncomment to make Heliport codes unique
#settings.transport.heliport_code_unique = True
# -------------------------------------------------------------------------
# Filter Manager
#settings.search.filter_manager = False
# if you want to have videos appearing in /default/video
#settings.base.youtube_id = [dict(id = "introduction",
# title = T("Introduction"),
# video_id = "HR-FtR2XkBU"),]
# -----------------------------------------------------------------------------
# XForms
# Configure xform resources (example)
#settings.xforms.resources = [("Request", "req_req")]
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# @ToDo: Have the system automatically enable migrate if a module is enabled
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("tour", Storage(
name_nice = T("Guided Tour Functionality"),
module_type = None,
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
module_type = None,
)),
# Uncomment to enable internal support requests
#("support", Storage(
# name_nice = T("Support"),
# #description = "Support Requests",
# restricted = True,
# module_type = None # This item is handled separately for the menu
# )),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 4
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 5,
)),
# Vehicle depends on Assets
("vehicle", Storage(
name_nice = T("Vehicles"),
#description = "Manage Vehicles",
restricted = True,
module_type = 10,
)),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 10,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 2
)),
("survey", Storage(
name_nice = T("Surveys"),
#description = "Create, enter, and manage surveys.",
restricted = True,
module_type = 5,
)),
#("dc", Storage(
# name_nice = T("Data Collection"),
# #description = "Data collection tool",
# restricted = True,
# module_type = 10
#)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
restricted = True,
module_type = 10
)),
("hms", Storage(
name_nice = T("Hospitals"),
#description = "Helps to monitor status of hospitals",
restricted = True,
module_type = 10
)),
#("disease", Storage(
# name_nice = T("Disease Tracking"),
# #description = "Helps to track cases and trace contacts in disease outbreaks",
# restricted = True,
# module_type = 10
#)),
("dvr", Storage(
name_nice = T("Disaster Victim Registry"),
#description = "Allow affected individuals & households to register to receive compensation and distributions",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("transport", Storage(
name_nice = T("Transport"),
restricted = True,
module_type = 10,
)),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
module_type = None,
)),
("member", Storage(
name_nice = T("Members"),
#description = "Membership Management System",
restricted = True,
module_type = 10,
)),
("budget", Storage(
name_nice = T("Budgeting Module"),
#description = "Allows a Budget to be drawn up",
restricted = True,
module_type = 10
)),
#("deploy", Storage(
# name_nice = T("Deployments"),
# #description = "Manage Deployments",
# restricted = True,
# module_type = 10,
#)),
# Deprecated: Replaced by event
#("irs", Storage(
# name_nice = T("Incidents"),
# #description = "Incident Reporting System",
# restricted = True,
# module_type = 10
#)),
#("dvi", Storage(
# name_nice = T("Disaster Victim Identification"),
# #description = "Disaster Victim Identification",
# restricted = True,
# module_type = 10,
# #access = "|DVI|", # Only users with the DVI role can see this module in the default menu & access the controller
#)),
#("mpr", Storage(
# name_nice = T("Missing Person Registry"),
# #description = "Helps to report and search for missing persons",
# restricted = True,
# module_type = 10,
#)),
#("scenario", Storage(
# name_nice = T("Scenarios"),
# #description = "Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).",
# restricted = True,
# module_type = 10,
#)),
#("vulnerability", Storage(
# name_nice = T("Vulnerability"),
# #description = "Manages vulnerability indicators",
# restricted = True,
# module_type = 10,
# )),
#("fire", Storage(
# name_nice = T("Fire Stations"),
# #description = "Fire Station Management",
# restricted = True,
# module_type = 1,
#)),
#("water", Storage(
# name_nice = T("Water"),
# #description = "Flood Gauges show water levels in various parts of the country",
# restricted = True,
# module_type = 10
#)),
#("patient", Storage(
# name_nice = T("Patient Tracking"),
# #description = "Tracking of Patients",
# restricted = True,
# module_type = 10
#)),
#("po", Storage(
# name_nice = T("Population Outreach"),
# #description = "Population Outreach",
# restricted = True,
# module_type = 10
#)),
#("security", Storage(
# name_nice = T("Security"),
# #description = "Security Management System",
# restricted = True,
# module_type = 10,
#)),
# These are specialist modules
#("cap", Storage(
# name_nice = T("CAP"),
# #description = "Create & broadcast CAP alerts",
# restricted = True,
# module_type = 10,
#)),
# Requires RPy2 & PostgreSQL
#("climate", Storage(
# name_nice = T("Climate"),
# #description = "Climate data portal",
# restricted = True,
# module_type = 10,
#)),
#("delphi", Storage(
# name_nice = T("Delphi Decision Maker"),
# #description = "Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.",
# restricted = False,
# module_type = 10,
#)),
# @ToDo: Port these Assessments to the Survey module
#("building", Storage(
# name_nice = T("Building Assessments"),
# #description = "Building Safety Assessments",
# restricted = True,
# module_type = 10,
#)),
# Deprecated by Surveys module
# - depends on CR, IRS & Impact
#("assess", Storage(
# name_nice = T("Assessments"),
# #description = "Rapid Assessments & Flexible Impact Assessments",
# restricted = True,
# module_type = 10,
#)),
#("impact", Storage(
# name_nice = T("Impacts"),
# #description = "Used by Assess",
# restricted = True,
# module_type = None,
#)),
#("ocr", Storage(
# name_nice = T("Optical Character Recognition"),
# #description = "Optical Character Recognition for reading the scanned handwritten paper forms.",
# restricted = False,
# module_type = None,
#)),
])
# END ========================================================================= | {
"content_hash": "e19a77f0d437c66ae12f2a4c5bddab07",
"timestamp": "",
"source": "github",
"line_count": 1202,
"max_line_length": 219,
"avg_line_length": 46.831114808652245,
"alnum_prop": 0.6318594446714395,
"repo_name": "michaelhowden/eden",
"id": "1247d2bd46aaab56a5a0ff3e2d08a930f55a3894",
"size": "56442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/templates/default/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "2357611"
},
{
"name": "HTML",
"bytes": "1320631"
},
{
"name": "JavaScript",
"bytes": "20040869"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "29520986"
},
{
"name": "Ruby",
"bytes": "3611"
},
{
"name": "Shell",
"bytes": "5022"
},
{
"name": "XSLT",
"bytes": "2818129"
}
],
"symlink_target": ""
} |
"""
Geo Location component.
This component covers platforms that deal with external events that contain
a geo location related to the installed HA instance.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/geo_location/
"""
import logging
from datetime import timedelta
from typing import Optional
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
_LOGGER = logging.getLogger(__name__)
ATTR_DISTANCE = 'distance'
DOMAIN = 'geo_location'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_EVENTS = 'All Geo Location Events'
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup(hass, config):
"""Set up this component."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_EVENTS)
await component.async_setup(config)
return True
class GeoLocationEvent(Entity):
"""This represents an external event with an associated geo location."""
@property
def state(self):
"""Return the state of the sensor."""
if self.distance is not None:
return round(self.distance, 1)
return None
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return None
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return None
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return None
@property
def state_attributes(self):
"""Return the state attributes of this external event."""
data = {}
if self.latitude is not None:
data[ATTR_LATITUDE] = round(self.latitude, 5)
if self.longitude is not None:
data[ATTR_LONGITUDE] = round(self.longitude, 5)
return data
| {
"content_hash": "e26b4acee487eee6f82a9e42686a9056",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 30.681159420289855,
"alnum_prop": 0.6920170051960322,
"repo_name": "persandstrom/home-assistant",
"id": "66753aad22117cc468316f03aeb350c254153fba",
"size": "2117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/geo_location/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
import os
print("Generate digits test dataset...")
test_digits_folder = os.path.normpath(os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), 'digits/test_digits'))
test_digits_file_path = os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), 'digits_multiclass_test.dat')
for (dirpath, dirnames, filenames) in os.walk(test_digits_folder):
test_digit_content = ''
for file_name in filenames:
test_file_path = os.path.normpath(os.path.join(dirpath, file_name))
digit_class = file_name[0:1]
one_line_string = ''
with open(test_file_path) as f:
for line in f:
space_line = " ".join(line).rstrip() + " "
one_line_string = one_line_string + space_line
one_line_string = one_line_string + digit_class + "\n"
test_digit_content = test_digit_content + one_line_string
target_file = open(test_digits_file_path, 'w')
target_file.write(test_digit_content)
target_file.close()
print("Generate digits test complete.")
print("Generate digits training dataset...")
train_digits_folder = os.path.normpath(os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), 'digits/training_digits'))
train_digits_file_path = os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), 'digits_multiclass_train.dat')
for (dirpath, dirnames, filenames) in os.walk(train_digits_folder):
train_digit_content = ''
for file_name in filenames:
train_file_path = os.path.normpath(os.path.join(dirpath, file_name))
digit_class = file_name[0:1]
one_line_string = ''
with open(train_file_path) as f:
for line in f:
space_line = " ".join(line).rstrip() + " "
one_line_string = one_line_string + space_line
one_line_string = one_line_string + digit_class + "\n"
train_digit_content = train_digit_content + one_line_string
target_file = open(train_digits_file_path, 'w')
target_file.write(train_digit_content)
target_file.close()
print("Generate digits training complete.")
| {
"content_hash": "9a30ea394e0d6c5051ecbfaf9fa50da6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 132,
"avg_line_length": 43.08163265306123,
"alnum_prop": 0.6428233064898152,
"repo_name": "fukuball/fuku-ml",
"id": "91993de978ff1003563f1445302d27774afff315",
"size": "2127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FukuML/dataset/digits_dataset_generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341784"
},
{
"name": "Shell",
"bytes": "271"
}
],
"symlink_target": ""
} |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import re
class HdfsParser():
def __init__(self):
self.initialLine = None
self.state = None
def parse_line(self, line):
hdfsLine = HdfsLine()
type, matcher = hdfsLine.recognize_type(line)
if type == HdfsLine.LineType.HeaderStart:
self.state = 'PROCESS_STARTED'
elif type == HdfsLine.LineType.Progress:
self.state = 'PROGRESS'
hdfsLine.parse_progress_log(line, matcher)
if self.initialLine == None: self.initialLine = hdfsLine
return hdfsLine
elif (type == HdfsLine.LineType.ProgressEnd):
self.state = 'PROCESS_FINISED'
return None
class HdfsLine():
class LineType:
HeaderStart, Progress, ProgressEnd, Unknown = range(4)
MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
PROGRESS_PATTERN = re.compile(
"(?P<date>.*?)\s+" +
"(?P<iteration>\d+)\s+" +
MEMORY_PATTERN % (1,1,1) + "\s+" +
MEMORY_PATTERN % (2,2,2) + "\s+" +
MEMORY_PATTERN % (3,3,3)
)
PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
def __init__(self):
self.date = None
self.iteration = None
self.bytesAlreadyMoved = None
self.bytesLeftToMove = None
self.bytesBeingMoved = None
self.bytesAlreadyMovedStr = None
self.bytesLeftToMoveStr = None
self.bytesBeingMovedStr = None
def recognize_type(self, line):
for type, pattern in (
(HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
(HdfsLine.LineType.Progress, self.PROGRESS_PATTERN),
(HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
):
m = re.match(pattern, line)
if m:
return type, m
return HdfsLine.LineType.Unknown, None
def parse_progress_log(self, line, m):
'''
Parse the line of 'hdfs rebalancer' output. The example output being parsed:
Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved
Jul 28, 2014 5:01:49 PM 0 0 B 5.74 GB 9.79 GB
Jul 28, 2014 5:03:00 PM 1 0 B 5.58 GB 9.79 GB
Throws AmbariException in case of parsing errors
'''
m = re.match(self.PROGRESS_PATTERN, line)
if m:
self.date = m.group('date')
self.iteration = int(m.group('iteration'))
self.bytesAlreadyMoved = self.parse_memory(m.group('memory_1'), m.group('mult_1'))
self.bytesLeftToMove = self.parse_memory(m.group('memory_2'), m.group('mult_2'))
self.bytesBeingMoved = self.parse_memory(m.group('memory_3'), m.group('mult_3'))
self.bytesAlreadyMovedStr = m.group('memmult_1')
self.bytesLeftToMoveStr = m.group('memmult_2')
self.bytesBeingMovedStr = m.group('memmult_3')
else:
raise AmbariException("Failed to parse line [%s]")
def parse_memory(self, memorySize, multiplier_type):
try:
factor = self.MEMORY_SUFFIX.index(multiplier_type)
except ValueError:
raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
return float(memorySize) * (1024 ** factor)
def to_json(self):
return {
'timeStamp' : self.date,
'iteration' : self.iteration,
'dataMoved': self.bytesAlreadyMovedStr,
'dataLeft' : self.bytesLeftToMoveStr,
'dataBeingMoved': self.bytesBeingMovedStr,
'bytesMoved': self.bytesAlreadyMoved,
'bytesLeft' : self.bytesLeftToMove,
'bytesBeingMoved': self.bytesBeingMoved,
}
def __str__(self):
return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
| {
"content_hash": "4de26647af3537e18b1f18f51483e6ba",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 194,
"avg_line_length": 39.674418604651166,
"alnum_prop": 0.6135209066041423,
"repo_name": "keedio/keedio-stacks",
"id": "aca1b05294034172f030865de9927f4617f2c6e2",
"size": "5141",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "KEEDIO/1.1/services/HDFS/package/scripts/hdfs_rebalance.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "386"
},
{
"name": "Python",
"bytes": "1080418"
},
{
"name": "Shell",
"bytes": "50473"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from spider import VERSION
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
readme = f.read()
f.close()
setup(
name='django-spider',
version=".".join(map(str, VERSION)),
description='a multi-threaded spider with a web interface',
long_description=readme,
author='Charles Leifer',
author_email='[email protected]',
url='https://github.com/coleifer/django-spider',
packages=find_packages(),
package_data = {
'spider': [
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='runtests.runtests',
)
| {
"content_hash": "e0890d640d712686acde172b8fd32767",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 63,
"avg_line_length": 27.636363636363637,
"alnum_prop": 0.6195175438596491,
"repo_name": "georgedorn/django-spider",
"id": "e5b96533d9a1191ebea8cc80a7cae326b613f4d4",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import sys
import re
import gevent
import urlparse
from gevent.pywsgi import WSGIHandler
from socketio import transports
class SocketIOHandler(WSGIHandler):
RE_REQUEST_URL = re.compile(r"""
^/(?P<resource>.+?)
/1
/(?P<transport_id>[^/]+)
/(?P<sessid>[^/]+)/?$
""", re.X)
RE_HANDSHAKE_URL = re.compile(r"^/(?P<resource>.+?)/1/$", re.X)
# new socket.io versions (> 0.9.8) call an obscure url with two slashes
# instead of a transport when disconnecting
# https://github.com/LearnBoost/socket.io-client/blob/0.9.16/lib/socket.js#L361
RE_DISCONNECT_URL = re.compile(r"""
^/(?P<resource>.+?)
/(?P<protocol_version>[^/]+)
//(?P<sessid>[^/]+)/?$
""", re.X)
handler_types = {
'websocket': transports.WebsocketTransport,
'flashsocket': transports.FlashSocketTransport,
'htmlfile': transports.HTMLFileTransport,
'xhr-multipart': transports.XHRMultipartTransport,
'xhr-polling': transports.XHRPollingTransport,
'jsonp-polling': transports.JSONPolling,
}
def __init__(self, config, *args, **kwargs):
"""Create a new SocketIOHandler.
:param config: dict Configuration for timeouts and intervals
that will go down to the other components, transports, etc..
"""
self.socketio_connection = False
self.allowed_paths = None
self.config = config
super(SocketIOHandler, self).__init__(*args, **kwargs)
self.transports = self.handler_types.keys()
if self.server.transports:
self.transports = self.server.transports
if not set(self.transports).issubset(set(self.handler_types)):
raise ValueError("transports should be elements of: %s" %
(self.handler_types.keys()))
def _do_handshake(self, tokens):
if tokens["resource"] != self.server.resource:
self.log_error("socket.io URL mismatch")
else:
socket = self.server.get_socket()
data = "%s:%s:%s:%s" % (socket.sessid,
self.config['heartbeat_timeout'] or '',
self.config['close_timeout'] or '',
",".join(self.transports))
self.write_smart(data)
def write_jsonp_result(self, data, wrapper="0"):
self.start_response("200 OK", [
("Content-Type", "application/javascript"),
])
self.result = ['io.j[%s]("%s");' % (wrapper, data)]
def write_plain_result(self, data):
self.start_response("200 OK", [
("Access-Control-Allow-Origin", self.environ.get('HTTP_ORIGIN', '*')),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", "3600"),
("Content-Type", "text/plain"),
])
self.result = [data]
def write_smart(self, data):
args = urlparse.parse_qs(self.environ.get("QUERY_STRING"))
if "jsonp" in args:
self.write_jsonp_result(data, args["jsonp"][0])
else:
self.write_plain_result(data)
self.process_result()
def handle_one_response(self):
"""This function deals with *ONE INCOMING REQUEST* from the web.
It will wire and exchange message to the queues for long-polling
methods, otherwise, will stay alive for websockets.
"""
path = self.environ.get('PATH_INFO')
# Kick non-socket.io requests to our superclass
if not path.lstrip('/').startswith(self.server.resource + '/'):
return super(SocketIOHandler, self).handle_one_response()
self.status = None
self.headers_sent = False
self.result = None
self.response_length = 0
self.response_use_chunked = False
# This is analyzed for each and every HTTP requests involved
# in the Socket.IO protocol, whether long-running or long-polling
# (read: websocket or xhr-polling methods)
request_method = self.environ.get("REQUEST_METHOD")
request_tokens = self.RE_REQUEST_URL.match(path)
handshake_tokens = self.RE_HANDSHAKE_URL.match(path)
disconnect_tokens = self.RE_DISCONNECT_URL.match(path)
if handshake_tokens:
# Deal with first handshake here, create the Socket and push
# the config up.
return self._do_handshake(handshake_tokens.groupdict())
elif disconnect_tokens:
# it's a disconnect request via XHR
tokens = disconnect_tokens.groupdict()
elif request_tokens:
tokens = request_tokens.groupdict()
# and continue...
else:
# This is no socket.io request. Let the WSGI app handle it.
return super(SocketIOHandler, self).handle_one_response()
# Setup socket
sessid = tokens["sessid"]
socket = self.server.get_socket(sessid)
if not socket:
self.handle_bad_request()
return [] # Do not say the session is not found, just bad request
# so they don't start brute forcing to find open sessions
if self.environ['QUERY_STRING'].startswith('disconnect'):
# according to socket.io specs disconnect requests
# have a `disconnect` query string
# https://github.com/LearnBoost/socket.io-spec#forced-socket-disconnection
socket.disconnect()
self.handle_disconnect_request()
return []
# Setup transport
transport = self.handler_types.get(tokens["transport_id"])
# In case this is WebSocket request, switch to the WebSocketHandler
# FIXME: fix this ugly class change
old_class = None
if issubclass(transport, (transports.WebsocketTransport,
transports.FlashSocketTransport)):
old_class = self.__class__
self.__class__ = self.server.ws_handler_class
self.prevent_wsgi_call = True # thank you
# TODO: any errors, treat them ??
self.handle_one_response() # does the Websocket dance before we continue
# Make the socket object available for WSGI apps
self.environ['socketio'] = socket
# Create a transport and handle the request likewise
self.transport = transport(self, self.config)
# transports register their own spawn'd jobs now
self.transport.do_exchange(socket, request_method)
if not socket.connection_established:
# This is executed only on the *first* packet of the establishment
# of the virtual Socket connection.
socket.connection_established = True
socket.state = socket.STATE_CONNECTED
socket._spawn_heartbeat()
socket._spawn_watcher()
try:
# We'll run the WSGI app if it wasn't already done.
if socket.wsgi_app_greenlet is None:
# TODO: why don't we spawn a call to handle_one_response here ?
# why call directly the WSGI machinery ?
start_response = lambda status, headers, exc=None: None
socket.wsgi_app_greenlet = gevent.spawn(self.application,
self.environ,
start_response)
except:
self.handle_error(*sys.exc_info())
# we need to keep the connection open if we are an open socket
if tokens['transport_id'] in ['flashsocket', 'websocket']:
# wait here for all jobs to finished, when they are done
gevent.joinall(socket.jobs)
# Switch back to the old class so references to this don't use the
# incorrect class. Useful for debugging.
if old_class:
self.__class__ = old_class
# Clean up circular references so they can be garbage collected.
if hasattr(self, 'websocket') and self.websocket:
if hasattr(self.websocket, 'environ'):
del self.websocket.environ
del self.websocket
if self.environ:
del self.environ
def handle_bad_request(self):
self.close_connection = True
self.start_response("400 Bad Request", [
('Content-Type', 'text/plain'),
('Connection', 'close'),
('Content-Length', 0)
])
def handle_disconnect_request(self):
self.close_connection = True
self.start_response("200 OK", [
('Content-Type', 'text/plain'),
('Connection', 'close'),
('Content-Length', 0)
])
| {
"content_hash": "da9c4df1a6833ab58cc419140cfda04e",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 86,
"avg_line_length": 39.577777777777776,
"alnum_prop": 0.577316114542392,
"repo_name": "arnuschky/gevent-socketio",
"id": "74681b8118709f0f06332593e76664204d8b24dd",
"size": "8905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socketio/handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4515"
},
{
"name": "JavaScript",
"bytes": "153092"
},
{
"name": "Python",
"bytes": "126175"
}
],
"symlink_target": ""
} |
from flask import Flask, flash
from flask import request
from flask import render_template
from flask import jsonify
from flask.ext.bootstrap import Bootstrap
from wtforms import Form, StringField, validators, FieldList, FormField, SubmitField, HiddenField
from dataModel import *
import hashlib
import unicodedata
import wtforms.validators
MAX_LEN = 2048
app = Flask(__name__)
Bootstrap(app)
shell_ids = {"Bash" : 1, "PowerShell" : 2, "cmd" : 3, "tcsh" : 4, "zsh" : 5, "ksh" : 6 }
def hash_text(text):
return hashlib.md5(text).hexdigest()
class ExampleForm(Form):
shell_type = HiddenField()
command = HiddenField()
error = HiddenField()
submit_fix = HiddenField()
fixForm = HiddenField()
class ExampleBatch(Form):
examples = FieldList(FormField(ExampleForm), min_entries = 1)
add_one = SubmitField('Add Another Command')
submit_all = SubmitField('Submit All')
@app.route('/licenses.html', methods=['GET','POST'])
def present_licenses():
return render_template('/licenses.html')
@app.route('/disclaimer.html', methods=['GET','POST'])
def present_disclaimer():
return render_template('/disclaimer.html')
@app.route('/submit.html', methods=['GET','POST'])
def present_page():
form = ExampleForm(request.form)
if request.method == 'POST' and form.validate():
resp_dict = {'new':True, 'exampleID':42}
#resp_dict = handle_example(form, True)
return jsonify(**resp_dict)
return render_template('/submit.html', form=form)
def handle_form_entries(a_formlist, insert):
for form in a_formlist:
handle_example(form, insert)
def unicode_to_str(unicode_txt):
return unicodedata.normalize('NFKD', unicode_txt).encode('ascii', 'ignore')
def should_submit_fix(ex_form):
return unicode_to_str(ex_form.submit_fix.data) == "yes"
def get_shell_id(ex_form):
shell_txt = unicode_to_str(ex_form.shell_type.data)
#return shell_ids[shell_txt]
return 1
def handle_example(ex_form, insert):
#import pdb; pdb.set_trace()
shell_type = get_shell_id(ex_form) #TO DO: Bash is one. This is hard-coded for now.
cmd_txt_unicode = ex_form.command.data
cmd_txt = unicode_to_str(cmd_txt_unicode)
cmd_txt = canonical_str(cmd_txt)
cmd_len, cmd_fst = count_and_first_word(cmd_txt)
err_txt_unicode = ex_form.error.data
err_txt = unicode_to_str(err_txt_unicode)
err_txt = canonical_str(err_txt)
err_len, err_fst = count_and_first_word(err_txt)
fix_txt_unicode = ex_form.fixForm.data
fix_txt = unicode_to_str(fix_txt_unicode)
fix_txt = canonical_str(fix_txt)
fix_len, fix_fst = count_and_first_word(fix_txt)
cmd_hash = hash_text(cmd_txt)
err_hash = hash_text(err_txt)
fix_hash = hash_text(fix_txt)
if insert:
cmd, cmd_created = Command.get_or_create(txt_hash = cmd_hash, first_word = cmd_fst, word_count = cmd_len, text = cmd_txt)
err, err_created = Output.get_or_create(txt_hash = err_hash, first_word = err_fst, word_count = err_len, text = err_txt)
inv, inv_created = Invocation.get_or_create(shell_id = shell_type, cmd_id=cmd.id, out_id=err.id)
if should_submit_fix(ex_form):
fix, fix_created = Fix.get_or_create(txt_hash = fix_hash, first_word = fix_fst, word_count = fix_len, text = fix_txt)
rEx, rex_created = RepairExample.get_or_create(invocation_id=inv.id, fix_id=fix.id)
return {'new':rex_created, 'exampleID':rEx.id}
def count_and_first_word(text):
toks = text.split()
return (len(toks), toks[0])
#split string into tokens then rejoin with one space
def canonical_str(text):
toks = text.split()
return " ".join(toks)
import os
if __name__=="__main__":
app.secret_key = 'placeholder'
port = 5000
app.run(host="0.0.0.0", port=port, debug=True)
| {
"content_hash": "6b5ed96c535af1f8bb2658969b396f3a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 129,
"avg_line_length": 35.027027027027025,
"alnum_prop": 0.6568930041152263,
"repo_name": "MichaelBVaughn/NoFAQ",
"id": "44640cdd53e1d2606c28d6cf2370ded0a5f5020b",
"size": "3888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebInterface/webApp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "F#",
"bytes": "209591"
},
{
"name": "Forth",
"bytes": "3"
},
{
"name": "HTML",
"bytes": "30991"
},
{
"name": "JavaScript",
"bytes": "87784"
},
{
"name": "Python",
"bytes": "5860"
},
{
"name": "SQLPL",
"bytes": "280"
}
],
"symlink_target": ""
} |
import FWCore.ParameterSet.Config as cms
#---------------------------------------------------------------------------------------------------
# M A I N
#---------------------------------------------------------------------------------------------------
# create the process
process = cms.Process('FILEFI')
# say how many events to process (Don't change!!)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
#>> input source
process.source = cms.Source(
#"PoolSource", fileNames = cms.untracked.vstring('file:XX-GPACK-XX.root')
"PoolSource", fileNames = cms.untracked.vstring('XX-LFN-XX')
)
process.source.inputCommands = cms.untracked.vstring(
"keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT"
)
## lazy download
#process.SiteLocalConfigService = cms.Service(
# "SiteLocalConfigService",
# overrideSourceCacheHintDir = cms.untracked.string("lazy-download")
#)
#>> configurations
# determine the global tag to use
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_v3'
# define meta data for this production
process.configurationMetadata = cms.untracked.PSet(
name = cms.untracked.string('BambuProd'),
version = cms.untracked.string('Mit_045'),
annotation = cms.untracked.string('AODSIM')
)
#>> standard sequences
# load some standard sequences we will need
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
# define sequence for ProductNotFound
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
wantSummary = cms.untracked.bool(False)
)
# Import/Load the filler so all is already available for config changes
from MitProd.TreeFiller.MitTreeFiller_cfi import MitTreeFiller
process.load('MitProd.TreeFiller.MitTreeFiller_cfi')
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# R E C O S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
## Load stablePart producers
from MitEdm.Producers.conversionElectronsStable_cfi import electronsStable
process.load('MitEdm.Producers.conversionElectronsStable_cfi')
# Load Mit Mvf Conversion producer
# MultiVertexFitter is currently broken
#from MitProd.TreeFiller.conversionProducer_cff import conversionProducer, addConversionFiller
#process.load('MitProd.TreeFiller.conversionProducer_cff')
#addConversionFiller(MitTreeFiller)
# Load basic particle flow collections
# Used for rho calculation
from CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi import goodOfflinePrimaryVertices
from CommonTools.ParticleFlow.pfParticleSelection_cff import pfParticleSelectionSequence, pfPileUp, pfNoPileUp, pfPileUpIso, pfNoPileUpIso
from CommonTools.ParticleFlow.pfPhotons_cff import pfPhotonSequence
from CommonTools.ParticleFlow.pfElectrons_cff import pfElectronSequence
from CommonTools.ParticleFlow.pfMuons_cff import pfMuonSequence
from CommonTools.ParticleFlow.TopProjectors.pfNoMuon_cfi import pfNoMuon
from CommonTools.ParticleFlow.TopProjectors.pfNoElectron_cfi import pfNoElectron
process.load('CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi')
process.load('CommonTools.ParticleFlow.pfParticleSelection_cff')
process.load('CommonTools.ParticleFlow.pfPhotons_cff')
process.load('CommonTools.ParticleFlow.pfElectrons_cff')
process.load('CommonTools.ParticleFlow.pfMuons_cff')
process.load('CommonTools.ParticleFlow.TopProjectors.pfNoMuon_cfi')
process.load('CommonTools.ParticleFlow.TopProjectors.pfNoElectron_cfi')
pfPileUp.PFCandidates = 'particleFlowPtrs'
pfNoPileUp.bottomCollection = 'particleFlowPtrs'
pfPileUpIso.PFCandidates = 'particleFlowPtrs'
pfNoPileUpIso.bottomCollection = 'particleFlowPtrs'
pfPileUp.Enable = True
pfPileUp.Vertices = 'goodOfflinePrimaryVertices'
pfPileUp.checkClosestZVertex = cms.bool(False)
# Loading PFProducer to get the ptrs
from RecoParticleFlow.PFProducer.pfLinker_cff import particleFlowPtrs
process.load('RecoParticleFlow.PFProducer.pfLinker_cff')
# Load PUPPI
from MitProd.TreeFiller.PuppiSetup_cff import puppiSequence, photonIdForPuppi
process.load('MitProd.TreeFiller.PuppiSetup_cff')
egmPhotonIDSequence = photonIdForPuppi(process)
# PUPPI jets
from RecoJets.JetProducers.ak4PFJetsPuppi_cfi import ak4PFJetsPuppi
process.load('RecoJets.JetProducers.ak4PFJetsPuppi_cfi')
ak4PFJetsPuppi.src = 'puppi'
ak4PFJetsPuppi.doAreaFastjet = True
# PUPPI MET
from RecoMET.METProducers.PFMET_cfi import pfMet
process.pfMETPuppi = pfMet.clone(
src = cms.InputTag('puppiForMET'),
calculateSignificance = cms.bool(False)
)
pfMETPuppi = process.pfMETPuppi
# Load HPS tau reconstruction (tau in AOD is older than the latest reco in release)
from RecoTauTag.Configuration.RecoPFTauTag_cff import PFTau
process.load('RecoTauTag.Configuration.RecoPFTauTag_cff')
# Load btagging
from MitProd.TreeFiller.utils.setupBTag import initBTag, setupBTag
vertexingPFPV = initBTag(process, 'PFPV', candidates = 'particleFlow', primaryVertex = 'offlinePrimaryVertices')
ak4PFBTagSequence = setupBTag(process, 'ak4PFJets', 'AKt4PF', 'PFPV')
ak4PFCHSBTagSequence = setupBTag(process, 'ak4PFJetsCHS', 'AKt4PFCHS', 'PFPV')
ak4PFPuppiBTagSequence = setupBTag(process, 'ak4PFJetsPuppi', 'AKt4PFPuppi', 'PFPV')
# recluster fat jets, btag subjets
from MitProd.TreeFiller.utils.makeFatJets import makeFatJets
ak8chsSequence = makeFatJets(process, src = 'pfNoPileUp', algoLabel = 'AK', jetRadius = 0.8, colLabel = 'PFJetsCHS', btagLabel = 'PFPV')
ak8puppiSequence = makeFatJets(process, src = 'puppi', algoLabel = 'AK', jetRadius = 0.8, colLabel = 'PuppiJets', btagLabel = 'PFPV')
ca15chsSequence = makeFatJets(process, src = 'pfNoPileUp', algoLabel = 'CA', jetRadius = 1.5, colLabel = 'PFJetsCHS', btagLabel = 'PFPV')
ca15puppiSequence = makeFatJets(process, src = 'puppi', algoLabel = 'CA', jetRadius = 1.5, colLabel = 'PuppiJets', btagLabel = 'PFPV')
#> Setup the met filters
from MitProd.TreeFiller.metFilters_cff import metFilters
process.load('MitProd.TreeFiller.metFilters_cff')
#> The bambu reco sequence
recoSequence = cms.Sequence(
electronsStable *
# conversionProducer *
goodOfflinePrimaryVertices *
particleFlowPtrs *
pfParticleSelectionSequence *
pfPhotonSequence *
pfMuonSequence *
pfNoMuon *
pfElectronSequence *
pfNoElectron *
PFTau *
egmPhotonIDSequence *
puppiSequence *
ak4PFJetsPuppi *
vertexingPFPV *
ak4PFBTagSequence *
ak4PFCHSBTagSequence *
ak4PFPuppiBTagSequence *
ak8chsSequence *
ak8puppiSequence *
ca15chsSequence *
ca15puppiSequence *
pfMETPuppi *
metFilters
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# G E N S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Import/Load genjets
from RecoJets.Configuration.GenJetParticles_cff import genJetParticles
process.load('RecoJets.Configuration.GenJetParticles_cff')
from RecoJets.Configuration.RecoGenJets_cff import ak4GenJets, ak8GenJets
process.load('RecoJets.Configuration.RecoGenJets_cff')
genSequence = cms.Sequence(
genJetParticles *
ak4GenJets *
ak8GenJets
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# B A M B U S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# remember the bambu sequence has been imported and loaded already in the beginning
# configure the filler
MitTreeFiller.TreeWriter.fileName = 'bambu-output-file-tmp'
MitTreeFiller.PileupInfo.active = True
MitTreeFiller.MCParticles.active = True
MitTreeFiller.MCEventInfo.active = True
MitTreeFiller.MCAllVertexes.active = True
MitTreeFiller.Trigger.active = False
MitTreeFiller.MetaInfos.l1GtReadRecEdmName = ''
# define fill bambu filler sequence
bambuFillerSequence = cms.Sequence(
MitTreeFiller
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# C M S S W P A T H
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
process.path = cms.Path(
recoSequence *
genSequence *
bambuFillerSequence
)
| {
"content_hash": "f5dedeab63e172652a6b7e3212fc8154",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 138,
"avg_line_length": 38.37931034482759,
"alnum_prop": 0.6919362084456424,
"repo_name": "cpausmit/Kraken",
"id": "bccac98f9956d090e4f2317e65c64c7abc91aff4",
"size": "8904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filefi/045/mc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3913"
},
{
"name": "M4",
"bytes": "49993"
},
{
"name": "Monkey C",
"bytes": "246623"
},
{
"name": "PHP",
"bytes": "4827"
},
{
"name": "Python",
"bytes": "1402251"
},
{
"name": "Shell",
"bytes": "119172"
}
],
"symlink_target": ""
} |
import re
import collections
from time import time
###################################################################
# Define Functions
def words(text): return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known(
[word]) or known(
edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get).capitalize()
def spelltest(tests, bias=None, verbose=False):
n, bad, unknown, start = time(), 0, 0, time()
if bias:
for target in tests:
NWORDS[target] += bias
for target, wrongs in tests.items():
for wrong in wrongs.split():
n += 1
w = correct(wrong)
if w != target:
bad += 1
unknown += (target not in NWORDS)
if verbose:
print '%r => %r (%d); expected %r (%d)' % (
wrong, w, NWORDS[w], target, NWORDS[target])
return dict(bad=bad, n=n, bias=bias, pct=float(100. - 100. * bad / n),
unknown=unknown, secs=float(time() - start))
###################################################################
# Create Vector of Words
NWORDS = train(words(file('/home/dan/Spark_Files/Web/big.txt').read()))
# Define the Alphabet
alphabet = 'abcdefghijklmnopqrstuvwxyz'
###################################################################
tests1 = {'access': 'acess', 'accessing': 'accesing', 'accommodation':
'accomodation acommodation acomodation', 'account': 'acount'}
tests2 = {'forbidden': 'forbiden', 'decisions': 'deciscions descisions',
'supposedly': 'supposidly', 'embellishing': 'embelishing'}
print spelltest(tests1)
print spelltest(tests2) # only do this after everything is debugged
###################################################################
# Facebook
test_text = 'For all my friends, whether close or casual, just because. This is \
one of the longest posts I will ever make, and one of the most real \
too. Everyone will go through some hard times at some point. Life \
isn''t easy. Just something to think about. Did you know the people \
that are the strongest are usually the most sensitive? Did you know \
the people who exhibit the most kindness are the first to get \
mistreated? Did you know the ones who take care of others all the time \
are usually the ones who need it the most? Did you know the three \
hardest things to say are I love you, I''m sorry, and help me? \
Sometimes just because a person looks happy, you have to look past \
their smile to see how much pain they may be in. To all my friends \
who are going through some issues right now--let''s start an intentional\
avalanche. We all need positive intentions right now. If I don''t see \
your name, I''ll understand. May I ask my friends wherever you might \
be, to kindly copy and paste this status for one hour to give a moment \
of support to all of those who have family problems, health struggles, \
job issues, worries of any kind and just needs to know that someone cares. \
Do it for all of us, for nobody is immune. I hope to see this on the walls \
of all my friends just for moral support. I know some will!!! I did \
it for a friend and you can too. You have to copy and paste this one, \
NO SHARING... I will leave it in the comments so it is easier \
for you to copy.'
# Split into Words
test_text = words(test_text)
# Loop through
for word in test_text:
if correct(word) == word.capitalize():
print word.capitalize()
else:
y = 'CORRECTION TO'
print word.capitalize(), y, correct(word)
##################################################################
# Import Data
Dilberate = words(
file('/home/dan/Spark_Files/Web/Spellings_and_Errors.txt').read())
# Create a Subset of 1000 Words
Dilberate2 = Dilberate[0:100]
# Loop through
n = 0
p = 0
list_corrections = []
for word in Dilberate2:
start_time = time()
correction = correct(word)
if correction == word.capitalize():
p += 1
else:
y = 'CORRECTION TO'
list_corrections.append(
[word.capitalize(), y, correction, time() - start_time])
n += 1
print('Number of Corrections: %i Number of Correctly Spelt Words: %i') % (n, p)
| {
"content_hash": "18fa071cc4caba7e7602c2bff8184cec",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 89,
"avg_line_length": 40.8828125,
"alnum_prop": 0.5769157271163768,
"repo_name": "dandxy89/ExperiKeras",
"id": "0c52701aac227a2c548a43b39a57d9ed41700bf6",
"size": "5530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Learning_Python/Spelling_Corrector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "214058"
},
{
"name": "Shell",
"bytes": "3069"
}
],
"symlink_target": ""
} |
from datetime import time
from django.conf import settings
def getTeamDaysOff(team):
days_off = team.days_off.split(',')
days_off = [int(day) for day in days_off]
return days_off
def checkIfDayOfWeekInDayOfWeekList(date, week_day_list):
if date.isoweekday() in week_day_list:
return True
def get_dinner_time():
return (time(settings.DINNER_TIME[0]), time(settings.DINNER_TIME[1]))
| {
"content_hash": "4aadb634ad751774b39dff66ceb531d0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 24.41176470588235,
"alnum_prop": 0.7012048192771084,
"repo_name": "desecho/tickets",
"id": "7f5733374aaaeccb78720e7db6964e14f121010b",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tickets_project/tickets/days_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2774"
},
{
"name": "HTML",
"bytes": "14662"
},
{
"name": "JavaScript",
"bytes": "10558"
},
{
"name": "Python",
"bytes": "79097"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
} |
class Request(object):
pass
| {
"content_hash": "7c81876ccba4f5c2b9baf681003f51c4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 16,
"alnum_prop": 0.6875,
"repo_name": "github/codeql",
"id": "c7ac65eaf056d7bc3b6bcef4de454b9f951d93e9",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/test/query-tests/Security/lib/twisted/web/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
} |
from google.protobuf import field_mask_pb2 # type: ignore
import proto # type: ignore
from google.cloud.tasks_v2.types import queue as gct_queue
from google.cloud.tasks_v2.types import task as gct_task
__protobuf__ = proto.module(
package="google.cloud.tasks.v2",
manifest={
"ListQueuesRequest",
"ListQueuesResponse",
"GetQueueRequest",
"CreateQueueRequest",
"UpdateQueueRequest",
"DeleteQueueRequest",
"PurgeQueueRequest",
"PauseQueueRequest",
"ResumeQueueRequest",
"ListTasksRequest",
"ListTasksResponse",
"GetTaskRequest",
"CreateTaskRequest",
"DeleteTaskRequest",
"RunTaskRequest",
},
)
class ListQueuesRequest(proto.Message):
r"""Request message for
[ListQueues][google.cloud.tasks.v2.CloudTasks.ListQueues].
Attributes:
parent (str):
Required. The location name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``
filter (str):
``filter`` can be used to specify a subset of queues. Any
[Queue][google.cloud.tasks.v2.Queue] field can be used as a
filter and several operators as supported. For example:
``<=, <, >=, >, !=, =, :``. The filter syntax is the same as
described in `Stackdriver's Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Sample filter "state: PAUSED".
Note that using filters might cause fewer queues than the
requested page_size to be returned.
page_size (int):
Requested page size.
The maximum page size is 9800. If unspecified, the page size
will be the maximum. Fewer queues than requested might be
returned, even if more queues exist; use the
[next_page_token][google.cloud.tasks.v2.ListQueuesResponse.next_page_token]
in the response to determine if more queues exist.
page_token (str):
A token identifying the page of results to return.
To request the first page results, page_token must be empty.
To request the next page of results, page_token must be the
value of
[next_page_token][google.cloud.tasks.v2.ListQueuesResponse.next_page_token]
returned from the previous call to
[ListQueues][google.cloud.tasks.v2.CloudTasks.ListQueues]
method. It is an error to switch the value of the
[filter][google.cloud.tasks.v2.ListQueuesRequest.filter]
while iterating through pages.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
filter = proto.Field(
proto.STRING,
number=2,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
class ListQueuesResponse(proto.Message):
r"""Response message for
[ListQueues][google.cloud.tasks.v2.CloudTasks.ListQueues].
Attributes:
queues (Sequence[google.cloud.tasks_v2.types.Queue]):
The list of queues.
next_page_token (str):
A token to retrieve next page of results.
To return the next page of results, call
[ListQueues][google.cloud.tasks.v2.CloudTasks.ListQueues]
with this value as the
[page_token][google.cloud.tasks.v2.ListQueuesRequest.page_token].
If the next_page_token is empty, there are no more results.
The page token is valid for only 2 hours.
"""
@property
def raw_page(self):
return self
queues = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gct_queue.Queue,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetQueueRequest(proto.Message):
r"""Request message for
[GetQueue][google.cloud.tasks.v2.CloudTasks.GetQueue].
Attributes:
name (str):
Required. The resource name of the queue. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateQueueRequest(proto.Message):
r"""Request message for
[CreateQueue][google.cloud.tasks.v2.CloudTasks.CreateQueue].
Attributes:
parent (str):
Required. The location name in which the queue will be
created. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``
The list of allowed locations can be obtained by calling
Cloud Tasks' implementation of
[ListLocations][google.cloud.location.Locations.ListLocations].
queue (google.cloud.tasks_v2.types.Queue):
Required. The queue to create.
[Queue's name][google.cloud.tasks.v2.Queue.name] cannot be
the same as an existing queue.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
queue = proto.Field(
proto.MESSAGE,
number=2,
message=gct_queue.Queue,
)
class UpdateQueueRequest(proto.Message):
r"""Request message for
[UpdateQueue][google.cloud.tasks.v2.CloudTasks.UpdateQueue].
Attributes:
queue (google.cloud.tasks_v2.types.Queue):
Required. The queue to create or update.
The queue's [name][google.cloud.tasks.v2.Queue.name] must be
specified.
Output only fields cannot be modified using UpdateQueue. Any
value specified for an output only field will be ignored.
The queue's [name][google.cloud.tasks.v2.Queue.name] cannot
be changed.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
A mask used to specify which fields of the
queue are being updated.
If empty, then all fields will be updated.
"""
queue = proto.Field(
proto.MESSAGE,
number=1,
message=gct_queue.Queue,
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteQueueRequest(proto.Message):
r"""Request message for
[DeleteQueue][google.cloud.tasks.v2.CloudTasks.DeleteQueue].
Attributes:
name (str):
Required. The queue name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class PurgeQueueRequest(proto.Message):
r"""Request message for
[PurgeQueue][google.cloud.tasks.v2.CloudTasks.PurgeQueue].
Attributes:
name (str):
Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class PauseQueueRequest(proto.Message):
r"""Request message for
[PauseQueue][google.cloud.tasks.v2.CloudTasks.PauseQueue].
Attributes:
name (str):
Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ResumeQueueRequest(proto.Message):
r"""Request message for
[ResumeQueue][google.cloud.tasks.v2.CloudTasks.ResumeQueue].
Attributes:
name (str):
Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ListTasksRequest(proto.Message):
r"""Request message for listing tasks using
[ListTasks][google.cloud.tasks.v2.CloudTasks.ListTasks].
Attributes:
parent (str):
Required. The queue name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
response_view (google.cloud.tasks_v2.types.Task.View):
The response_view specifies which subset of the
[Task][google.cloud.tasks.v2.Task] will be returned.
By default response_view is
[BASIC][google.cloud.tasks.v2.Task.View.BASIC]; not all
information is retrieved by default because some data, such
as payloads, might be desirable to return only when needed
because of its large size or because of the sensitivity of
data that it contains.
Authorization for
[FULL][google.cloud.tasks.v2.Task.View.FULL] requires
``cloudtasks.tasks.fullView`` `Google
IAM <https://cloud.google.com/iam/>`__ permission on the
[Task][google.cloud.tasks.v2.Task] resource.
page_size (int):
Maximum page size.
Fewer tasks than requested might be returned, even if more
tasks exist; use
[next_page_token][google.cloud.tasks.v2.ListTasksResponse.next_page_token]
in the response to determine if more tasks exist.
The maximum page size is 1000. If unspecified, the page size
will be the maximum.
page_token (str):
A token identifying the page of results to return.
To request the first page results, page_token must be empty.
To request the next page of results, page_token must be the
value of
[next_page_token][google.cloud.tasks.v2.ListTasksResponse.next_page_token]
returned from the previous call to
[ListTasks][google.cloud.tasks.v2.CloudTasks.ListTasks]
method.
The page token is valid for only 2 hours.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
response_view = proto.Field(
proto.ENUM,
number=2,
enum=gct_task.Task.View,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
class ListTasksResponse(proto.Message):
r"""Response message for listing tasks using
[ListTasks][google.cloud.tasks.v2.CloudTasks.ListTasks].
Attributes:
tasks (Sequence[google.cloud.tasks_v2.types.Task]):
The list of tasks.
next_page_token (str):
A token to retrieve next page of results.
To return the next page of results, call
[ListTasks][google.cloud.tasks.v2.CloudTasks.ListTasks] with
this value as the
[page_token][google.cloud.tasks.v2.ListTasksRequest.page_token].
If the next_page_token is empty, there are no more results.
"""
@property
def raw_page(self):
return self
tasks = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gct_task.Task,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetTaskRequest(proto.Message):
r"""Request message for getting a task using
[GetTask][google.cloud.tasks.v2.CloudTasks.GetTask].
Attributes:
name (str):
Required. The task name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``
response_view (google.cloud.tasks_v2.types.Task.View):
The response_view specifies which subset of the
[Task][google.cloud.tasks.v2.Task] will be returned.
By default response_view is
[BASIC][google.cloud.tasks.v2.Task.View.BASIC]; not all
information is retrieved by default because some data, such
as payloads, might be desirable to return only when needed
because of its large size or because of the sensitivity of
data that it contains.
Authorization for
[FULL][google.cloud.tasks.v2.Task.View.FULL] requires
``cloudtasks.tasks.fullView`` `Google
IAM <https://cloud.google.com/iam/>`__ permission on the
[Task][google.cloud.tasks.v2.Task] resource.
"""
name = proto.Field(
proto.STRING,
number=1,
)
response_view = proto.Field(
proto.ENUM,
number=2,
enum=gct_task.Task.View,
)
class CreateTaskRequest(proto.Message):
r"""Request message for
[CreateTask][google.cloud.tasks.v2.CloudTasks.CreateTask].
Attributes:
parent (str):
Required. The queue name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
The queue must already exist.
task (google.cloud.tasks_v2.types.Task):
Required. The task to add.
Task names have the following format:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``.
The user can optionally specify a task
[name][google.cloud.tasks.v2.Task.name]. If a name is not
specified then the system will generate a random unique task
id, which will be set in the task returned in the
[response][google.cloud.tasks.v2.Task.name].
If [schedule_time][google.cloud.tasks.v2.Task.schedule_time]
is not set or is in the past then Cloud Tasks will set it to
the current time.
Task De-duplication:
Explicitly specifying a task ID enables task de-duplication.
If a task's ID is identical to that of an existing task or a
task that was deleted or executed recently then the call
will fail with
[ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS]. If the
task's queue was created using Cloud Tasks, then another
task with the same name can't be created for ~1hour after
the original task was deleted or executed. If the task's
queue was created using queue.yaml or queue.xml, then
another task with the same name can't be created for ~9days
after the original task was deleted or executed.
Because there is an extra lookup cost to identify duplicate
task names, these
[CreateTask][google.cloud.tasks.v2.CloudTasks.CreateTask]
calls have significantly increased latency. Using hashed
strings for the task id or for the prefix of the task id is
recommended. Choosing task ids that are sequential or have
sequential prefixes, for example using a timestamp, causes
an increase in latency and error rates in all task commands.
The infrastructure relies on an approximately uniform
distribution of task ids to store and serve tasks
efficiently.
response_view (google.cloud.tasks_v2.types.Task.View):
The response_view specifies which subset of the
[Task][google.cloud.tasks.v2.Task] will be returned.
By default response_view is
[BASIC][google.cloud.tasks.v2.Task.View.BASIC]; not all
information is retrieved by default because some data, such
as payloads, might be desirable to return only when needed
because of its large size or because of the sensitivity of
data that it contains.
Authorization for
[FULL][google.cloud.tasks.v2.Task.View.FULL] requires
``cloudtasks.tasks.fullView`` `Google
IAM <https://cloud.google.com/iam/>`__ permission on the
[Task][google.cloud.tasks.v2.Task] resource.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
task = proto.Field(
proto.MESSAGE,
number=2,
message=gct_task.Task,
)
response_view = proto.Field(
proto.ENUM,
number=3,
enum=gct_task.Task.View,
)
class DeleteTaskRequest(proto.Message):
r"""Request message for deleting a task using
[DeleteTask][google.cloud.tasks.v2.CloudTasks.DeleteTask].
Attributes:
name (str):
Required. The task name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class RunTaskRequest(proto.Message):
r"""Request message for forcing a task to run now using
[RunTask][google.cloud.tasks.v2.CloudTasks.RunTask].
Attributes:
name (str):
Required. The task name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``
response_view (google.cloud.tasks_v2.types.Task.View):
The response_view specifies which subset of the
[Task][google.cloud.tasks.v2.Task] will be returned.
By default response_view is
[BASIC][google.cloud.tasks.v2.Task.View.BASIC]; not all
information is retrieved by default because some data, such
as payloads, might be desirable to return only when needed
because of its large size or because of the sensitivity of
data that it contains.
Authorization for
[FULL][google.cloud.tasks.v2.Task.View.FULL] requires
``cloudtasks.tasks.fullView`` `Google
IAM <https://cloud.google.com/iam/>`__ permission on the
[Task][google.cloud.tasks.v2.Task] resource.
"""
name = proto.Field(
proto.STRING,
number=1,
)
response_view = proto.Field(
proto.ENUM,
number=2,
enum=gct_task.Task.View,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "cbc9b33f4dd20fe49cbe44802f42cc67",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 88,
"avg_line_length": 32.8929889298893,
"alnum_prop": 0.6147632936953108,
"repo_name": "googleapis/python-tasks",
"id": "f4cf7e0ae8eab22048b3e5e5cae797d88eaa72ae",
"size": "18428",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/tasks_v2/types/cloudtasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1867840"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
from lollipop.errors import ValidationError, ValidationErrorBuilder, \
ErrorMessagesMixin, merge_errors
from lollipop.utils import is_sequence, is_mapping, make_context_aware, \
constant, identity, OpenStruct, DictWithDefault
from lollipop.compat import string_types, int_types, iteritems, OrderedDict
import datetime
__all__ = [
'MISSING',
'Type',
'Any',
'String',
'Integer',
'Float',
'Boolean',
'Date',
'DateTime',
'Time',
'List',
'Tuple',
'Dict',
'OneOf',
'type_name_hint',
'dict_value_hint',
'Field',
'AttributeField',
'IndexField',
'MethodField',
'FunctionField',
'Object',
'Modifier',
'Constant',
'Optional',
'LoadOnly',
'DumpOnly',
'Transform',
'validated_type',
]
class MissingType(object):
def __repr__(self):
return '<MISSING>'
#: Special singleton value (like None) to represent case when value is missing.
MISSING = MissingType()
class ValidatorCollection(object):
def __init__(self, validators):
self._validators = [make_context_aware(validator, 1)
for validator in validators]
def append(self, validator):
self._validators.append(make_context_aware(validator, 1))
def insert(self, idx, validator):
self._validators.insert(idx, make_context_aware(validator, 1))
def __len__(self):
return len(self._validators)
def __getitem__(self, idx):
return self._validators[idx]
def __setitem__(self, idx, validator):
self._validators[idx] = make_context_aware(validator, 1)
def __delitem__(self, idx):
del self._validators[idx]
def __iter__(self):
for validator in self._validators:
yield validator
class Type(ErrorMessagesMixin, object):
"""Base class for defining data types.
:param string name: Name of type or None for unnamed types
:param string description: Description of type or None
:param list validate: A validator or list of validators for this data type.
Validator is a callable that takes serialized data and raises
:exc:`~lollipop.errors.ValidationError` if data is invalid.
Validator return value is ignored.
:param dict error_messages: Mapping of error message keys to error message text.
Error messages can contain placeholders in standard string.format() format
(e.g. "Invalid value: {value}"). Consult particular type's documentation on
available data.
Error message keys:
* invalid - value is invalid. Interpolation data:
* data - actual value
* required - value is required
"""
default_error_messages = {
'invalid': 'Invalid value type',
'required': 'Value is required',
}
def __init__(self, name=None, description=None, validate=None, *args, **kwargs):
super(Type, self).__init__(*args, **kwargs)
if validate is None:
validate = []
elif callable(validate):
validate = [validate]
self.name = name
self.description = description
self.validators = ValidatorCollection(validate)
def validate(self, data, context=None):
"""Takes serialized data and returns validation errors or None.
:param data: Data to validate.
:param context: Context data.
:returns: validation errors or None
"""
try:
self.load(data, context)
return None
except ValidationError as ve:
return ve.messages
def load(self, data, context=None):
"""Deserialize data from primitive types. Raises
:exc:`~lollipop.errors.ValidationError` if data is invalid.
:param data: Data to deserialize.
:param context: Context data.
:returns: Loaded data
:raises: :exc:`~lollipop.errors.ValidationError`
"""
errors_builder = ValidationErrorBuilder()
for validator in self.validators:
try:
validator(data, context)
except ValidationError as ve:
errors_builder.add_errors(ve.messages)
errors_builder.raise_errors()
return data
def dump(self, value, context=None):
"""Serialize data to primitive types. Raises
:exc:`~lollipop.errors.ValidationError` if data is invalid.
:param value: Value to serialize.
:param context: Context data.
:returns: Serialized data.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
return value
def __repr__(self):
return '<{klass}>'.format(klass=self.__class__.__name__)
class Any(Type):
"""Any type. Does not transform/validate given data."""
pass
class Number(Type):
"""Any number type (integer/float).
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
"""
num_type = float
default_error_messages = {
'invalid': 'Value should be number',
}
_invalid_types = string_types + (bool,)
def _normalize(self, value):
try:
return self.num_type(value)
except (TypeError, ValueError):
self._fail('invalid', data=value)
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if isinstance(data, self._invalid_types):
self._fail('invalid')
return super(Number, self).load(self._normalize(data), *args, **kwargs)
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
return super(Number, self).dump(self._normalize(value), *args, **kwargs)
class Integer(Number):
"""An integer type.
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
"""
num_type = int
default_error_messages = {
'invalid': 'Value should be integer'
}
class Float(Number):
"""A float type.
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
"""
num_type = float
default_error_messages = {
'invalid': 'Value should be float'
}
class String(Type):
"""A string type.
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
"""
default_error_messages = {
'invalid': 'Value should be string',
}
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not isinstance(data, string_types):
self._fail('invalid')
return super(String, self).load(data, *args, **kwargs)
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
if not isinstance(value, string_types):
self._fail('invalid')
return super(String, self).dump(str(value), *args, **kwargs)
class Boolean(Type):
"""A boolean type.
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
"""
default_error_messages = {
'invalid': 'Value should be boolean',
}
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not isinstance(data, bool):
self._fail('invalid', data=data)
return super(Boolean, self).load(data, *args, **kwargs)
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
if not isinstance(value, bool):
self._fail('invalid', data=value)
return super(Boolean, self).dump(bool(value), *args, **kwargs)
class DateTime(Type):
"""A date and time type which serializes into string.
:param str format: Format string (see :func:`datetime.datetime.strptime`) or
one of predefined format names (e.g. 'iso8601', 'rfc3339', etc.
See :const:`~DateTime.FORMATS`)
:param kwargs: Same keyword arguments as for :class:`Type`.
Instead of specifying format strings explicitly, you can instead use one of
predefined strings for formats. E.g. `DateTime(format='rfc3339')`.
Predefined formats:
* iso - shortcut for iso8601 (default)
* iso8601 - %Y-%m-%dT%H:%M:%S%Z (e.g. "2015-12-31T14:59:59PDT")
* rfc - shortcut for rfc3339
* rfc3339 - %Y-%m-%dT%H:%M:%S%Z (e.g. "2015-12-31T14:59:59UTC")
* rfc822 - %d %b %y %H:%M:%S %Z (e.g. "31 Dec 2015 14:59:59 PDT")
Error message keys:
* invalid - invalid datetime value (on dump). Interpolation data:
* data - actual value
* invalid_type - value is not a string (on load). Interpolation data:
* data - actual value
* invalid_format - string does not match datetime format (on load).
Interpolation data:
* data - actual value
* format - format string
"""
FORMATS = {
'iso': '%Y-%m-%dT%H:%M:%S%Z', # shortcut for iso8601
'iso8601': '%Y-%m-%dT%H:%M:%S%Z',
'rfc': '%Y-%m-%dT%H:%M:%S%Z', # shortcut for rfc3339
'rfc3339': '%Y-%m-%dT%H:%M:%S%Z',
'rfc822': '%d %b %y %H:%M:%S %Z',
}
DEFAULT_FORMAT = 'iso'
default_error_messages = {
'invalid': 'Invalid datetime value',
'invalid_type': 'Value should be string',
'invalid_format': 'Value should match datetime format',
}
def __init__(self, format=None, *args, **kwargs):
super(DateTime, self).__init__(*args, **kwargs)
self.format = format or self.DEFAULT_FORMAT
def _convert_value(self, value):
return value
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not isinstance(data, string_types):
self._fail('invalid_type', data=data)
format_str = self.FORMATS.get(self.format, self.format)
try:
date = self._convert_value(datetime.datetime.strptime(data, format_str))
return super(DateTime, self).load(date, *args, **kwargs)
except ValueError:
self._fail('invalid_format', data=data, format=format_str)
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
format_str = self.FORMATS.get(self.format, self.format)
try:
return super(DateTime, self)\
.dump(value.strftime(format_str), *args, **kwargs)
except (AttributeError, ValueError):
self._fail('invalid', data=value)
class Date(DateTime):
"""A date type which serializes into string.
:param str format: Format string (see :func:`datetime.datetime.strptime`) or
one of predefined format names (e.g. 'iso8601', 'rfc3339', etc.
See :const:`~Date.FORMATS`)
:param kwargs: Same keyword arguments as for :class:`Type`.
Predefined formats:
* iso - shortcut for iso8601 (default)
* iso8601 - %Y-%m-%d (e.g. "2015-12-31")
* rfc - shortcut for rfc3339
* rfc3339 - %Y-%m-%d (e.g. "2015-12-31")
* rfc822 - %d %b %y (e.g. "31 Dec 2015")
Error message keys:
* invalid - invalid date value (on dump). Interpolation data:
* data - actual value
* invalid_type - value is not a string (on load). Interpolation data:
* data - actual value
* invalid_format - string does not match date format (on load).
Interpolation data:
* data - actual value
* format - format string
"""
FORMATS = {
'iso': '%Y-%m-%d', # shortcut for iso8601
'iso8601': '%Y-%m-%d',
'rfc': '%Y-%m-%d', # shortcut for rfc3339
'rfc3339': '%Y-%m-%d',
'rfc822': '%d %b %y',
}
DEFAULT_FORMAT = 'iso'
default_error_messages = {
'invalid': 'Invalid date value',
'invalid_type': 'Value should be string',
'invalid_format': 'Value should match date format',
}
def _convert_value(self, value):
return value.date()
class Time(DateTime):
"""A time type which serializes into string.
:param str format: Format string (see :func:`datetime.datetime.strptime`) or
one of predefined format names (e.g. 'iso8601', 'rfc3339', etc.)
:param kwargs: Same keyword arguments as for :class:`Type`.
Predefined formats:
* iso - shortcut for iso8601 (default)
* iso8601 - %H:%M:%S (e.g. "14:59:59")
Error message keys:
* invalid - invalid time value (on dump). Interpolation data:
* data - actual value
* invalid_type - value is not a string (on load). Interpolation data:
* data - actual value
* invalid_format - string does not match date format (on load).
Interpolation data:
* data - actual value
* format - format string
"""
FORMATS = {
'iso': '%H:%M:%S', # shortcut for iso8601
'iso8601': '%H:%M:%S',
}
DEFAULT_FORMAT = 'iso'
default_error_messages = {
'invalid': 'Invalid time value',
'invalid_type': 'Value should be string',
'invalid_format': 'Value should match time format',
}
def _convert_value(self, value):
return value.time()
class List(Type):
"""A homogenous list type.
Example: ::
List(String()).load(['foo', 'bar', 'baz'])
:param Type item_type: Type of list elements.
:param kwargs: Same keyword arguments as for :class:`Type`.
Error message keys:
* invalid - invalid list value. Interpolation data:
* data - actual value
"""
default_error_messages = {
'invalid': 'Value should be list',
}
def __init__(self, item_type, **kwargs):
super(List, self).__init__(**kwargs)
self.item_type = item_type
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not is_sequence(data) or isinstance(data, string_types):
self._fail('invalid', data=data)
errors_builder = ValidationErrorBuilder()
items = []
for idx, item in enumerate(data):
try:
items.append(self.item_type.load(item, *args, **kwargs))
except ValidationError as ve:
errors_builder.add_errors({idx: ve.messages})
errors_builder.raise_errors()
return super(List, self).load(items, *args, **kwargs)
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
if not is_sequence(value) or isinstance(value, string_types):
self._fail('invalid', invalid=value)
errors_builder = ValidationErrorBuilder()
items = []
for idx, item in enumerate(value):
try:
items.append(self.item_type.dump(item, *args, **kwargs))
except ValidationError as ve:
errors_builder.add_errors({idx: ve.messages})
errors_builder.raise_errors()
return super(List, self).dump(items, *args, **kwargs)
def __repr__(self):
return '<{klass} of {item_type}>'.format(
klass=self.__class__.__name__,
item_type=repr(self.item_type),
)
class Tuple(Type):
"""A heterogenous list type.
Example: ::
Tuple([String(), Integer(), Boolean()]).load(['foo', 123, False])
# => ('foo', 123, False)
:param list item_types: List of item types.
:param kwargs: Same keyword arguments as for :class:`Type`.
Error message keys:
* invalid - invalid list value. Interpolation data:
* data - actual value
* invalid_length: tuple has invalid length: Interpolation data:
* expected_length
* actual_length
"""
default_error_messages = {
'invalid': 'Value should be list',
'invalid_length': 'Value length should be {expected_length}',
}
def __init__(self, item_types, **kwargs):
super(Tuple, self).__init__(**kwargs)
self.item_types = item_types
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not is_sequence(data):
self._fail('invalid', data=data)
if len(data) != len(self.item_types):
self._fail('invalid_length',
expected_length=len(self.item_types),
actual_length=len(data))
errors_builder = ValidationErrorBuilder()
result = []
for idx, (item_type, item) in enumerate(zip(self.item_types, data)):
try:
result.append(item_type.load(item, *args, **kwargs))
except ValidationError as ve:
errors_builder.add_errors({idx: ve.messages})
errors_builder.raise_errors()
return tuple(super(Tuple, self).load(result, *args, **kwargs))
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
if not is_sequence(value):
self._fail('invalid', data=value)
if len(value) != len(self.item_types):
self._fail('invalid_length',
expected_length=len(self.item_types),
actual_length=len(value))
errors_builder = ValidationErrorBuilder()
result = []
for idx, (item_type, item) in enumerate(zip(self.item_types, value)):
try:
result.append(item_type.dump(item, *args, **kwargs))
except ValidationError as ve:
errors_builder.add_errors({idx: ve.messages})
errors_builder.raise_errors()
return super(Tuple, self).dump(result, *args, **kwargs)
def __repr__(self):
return '<{klass} of {item_types}>'.format(
klass=self.__class__.__name__,
item_types=repr(self.item_types),
)
def type_name_hint(data):
"""Returns type name of given value.
To be used as a type hint in :class:`OneOf`.
"""
return data.__class__.__name__
def dict_value_hint(key, mapper=None):
"""Returns a function that takes a dictionary and returns value of
particular key. The returned value can be optionally processed by `mapper`
function.
To be used as a type hint in :class:`OneOf`.
"""
if mapper is None:
mapper = identity
def hinter(data):
return mapper(data.get(key))
return hinter
class OneOf(Type):
"""Type that alternates between several other types.
There are two ways to use it:
* with sequence of types
* with mapping of types
When used with sequence of types, it tries to load/dump data with each
type in a sequence until operation succeeds, proceeding to next type if
operation fails.
Types sequence example: ::
ValueType = OneOf([String(), List(String())])
ValutType.dump('foo') # => 'foo'
ValueType.dump(['foo', 'bar']) # => ['foo', 'bar']
When used with a mapping of types, it requires two hint functions to be
provided: one to determine type name for dumped object and other one to
determine type name for loaded data. E.g. dump hint can be based on object
class. Load hint can be done either by inspecting data structure or using
injected data: you can modify schema of your objects (assuming your data
is objects) and add extra field called e.g. "type" and put some constant
there. Then you can consult that field value to know what type to use for
loading.
Hint function example: ::
def dump_hint(data):
return data.__class__.__name__
def load_hint(key):
def hinter(data):
return data.get(key)
return hinter
Type mapping example: ::
from collections import namedtuple
Foo = namedtuple('Foo', ['foo'])
Bar = namedtuple('Bar', ['bar'])
FooType = Object({'foo': String()}, constructor=Foo)
BarType = Object({'bar': Integer()}, constructor=Bar)
def object_with_type(name, subject_type):
return Object(subject_type, {'type': DumpOnly(Constant(name))},
constructor=subject_type.constructor)
FooBarType = OneOf({
'Foo': object_with_type('Foo', FooType),
'Bar': object_with_type('Bar', BarType),
}, dump_hint=type_name_hint, load_hint=dict_value_hint('type'))
List(FooBarType).dump([Foo(foo='hello'), Bar(bar=123)])
# => [{'type': 'Foo', 'foo': 'hello'}, {'type': 'Bar', 'bar': 123}]
List(FooBarType).load([{'type': 'Foo', 'foo': 'hello'},
{'type': 'Bar', 'bar': 123}])
# => [Foo(foo='hello'), Bar(bar=123)]
Using hint functions can be handier because when trying different types in
sequence it is impossible to distinguish between cases when data is obviously
of different types vs data of that particular type but invalid.
Example: ::
NameType = String(validate=Length(max=32))
ValueType = OneOf([NameType, List(NameType)])
# Most likely if you specify long string, you will get error that
# data is of invalid type.
# Here is an alternative:
def value_type_hint(data):
if isinstance(data, (str, unicode)):
return 'string'
elif isinstance(data, collections.Sequence):
return 'list-of-names'
else:
return None
ValueType = OneOf(
{
'name': NameType,
'list-of-names': List(NameType),
},
load_hint=value_type_hint,
dump_hint=value_type_hint,
)
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
* unknown_type_id - unknown type ID. Interpolation data:
* data - actual value
* type_id - hinted type ID
* no_type_matched - in case of sequence of types error when no types matched.
Interpolation data:
* value
"""
default_error_messages = {
'invalid': 'Invalid data',
'unknown_type_id': 'Unknown type ID: {type_id}',
'no_type_matched': 'No type matched',
}
def __init__(self, types,
load_hint=type_name_hint,
dump_hint=type_name_hint,
*args, **kwargs):
super(OneOf, self).__init__(*args, **kwargs)
self.types = types
self.load_hint = load_hint
self.dump_hint = dump_hint
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if is_mapping(self.types) and self.load_hint:
type_id = self.load_hint(data)
if type_id not in self.types:
self._fail('unknown_type_id', data=data, type_id=type_id)
item_type = self.types[type_id]
result = item_type.load(data, *args, **kwargs)
return super(OneOf, self).load(result, *args, **kwargs)
else:
for item_type in (self.types.values()
if is_mapping(self.types) else self.types):
try:
result = item_type.load(data, *args, **kwargs)
return super(OneOf, self).load(result, *args, **kwargs)
except ValidationError as ve:
pass
self._fail('no_type_matched', data=data)
def dump(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if is_mapping(self.types) and self.dump_hint:
type_id = self.dump_hint(data)
if type_id not in self.types:
self._fail('unknown_type_id', data=data, type_id=type_id)
item_type = self.types[type_id]
result = item_type.dump(data, *args, **kwargs)
return super(OneOf, self).dump(result, *args, **kwargs)
else:
for item_type in (self.types.values()
if is_mapping(self.types) else self.types):
try:
result = item_type.dump(data, *args, **kwargs)
return super(OneOf, self).dump(result, *args, **kwargs)
except ValidationError as ve:
pass
self._fail('no_type_matched', data=data)
def __repr__(self):
return '<{klass} {types}>'.format(
klass=self.__class__.__name__,
types=repr(self.types),
)
class Dict(Type):
"""A dict type. You can specify either a single type for all dict values
or provide a dict-like mapping object that will return proper Type instance
for each given dict key.
Example: ::
Dict(Integer()).load({'key0': 1, 'key1': 5, 'key2': 15})
Dict({'foo': String(), 'bar': Integer()}).load({
'foo': 'hello', 'bar': 123,
})
:param dict value_types: A single :class:`Type` for all dict values or mapping
of allowed keys to :class:`Type` instances (defaults to :class:`Any`)
:param Type key_type: Type for dictionary keys (defaults to :class:`Any`).
Can be used to either transform or validate dictionary keys.
:param kwargs: Same keyword arguments as for :class:`Type`.
Error message keys:
* invalid - invalid value type. Interpolation data:
* data - actual value
"""
default_error_messages = {
'invalid': 'Value should be dict',
}
def __init__(self, value_types=None, key_type=None, **kwargs):
super(Dict, self).__init__(**kwargs)
if value_types is None:
value_types = DictWithDefault(default=Any())
elif isinstance(value_types, Type):
value_types = DictWithDefault(default=value_types)
self.value_types = value_types
self.key_type = key_type or Any()
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not is_mapping(data):
self._fail('invalid', data=data)
errors_builder = ValidationErrorBuilder()
result = {}
for k, v in iteritems(data):
try:
k = self.key_type.load(k, *args, **kwargs)
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
if k is MISSING:
continue
value_type = self.value_types.get(k)
if value_type is None:
continue
try:
loaded = value_type.load(v, *args, **kwargs)
if loaded is not MISSING:
result[k] = loaded
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
for k, value_type in iteritems(self.value_types):
if k in data:
continue
try:
loaded = value_type.load(MISSING, *args, **kwargs)
if loaded is not MISSING:
result[k] = loaded
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
errors_builder.raise_errors()
return super(Dict, self).load(result, *args, **kwargs)
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
if not is_mapping(value):
self._fail('invalid', data=value)
errors_builder = ValidationErrorBuilder()
result = {}
for k, v in iteritems(value):
value_type = self.value_types.get(k)
if value_type is None:
continue
try:
k = self.key_type.dump(k, *args, **kwargs)
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
if k is MISSING:
continue
try:
dumped = value_type.dump(v, *args, **kwargs)
if dumped is not MISSING:
result[k] = dumped
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
for k, value_type in iteritems(self.value_types):
if k in result:
continue
try:
dumped = value_type.dump(value.get(k, MISSING), *args, **kwargs)
if dumped is not MISSING:
result[k] = dumped
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
errors_builder.raise_errors()
return super(Dict, self).dump(result, *args, **kwargs)
def __repr__(self):
return '<{klass} of {value_types}>'.format(
klass=self.__class__.__name__,
value_types=repr(self.value_types),
)
class Constant(Type):
"""Type that always serializes to given value and
checks this value on deserialize.
:param value: Value constant for this field.
:param Type field_type: Field type.
Error message keys:
* required
* value - incorrect value. Interpolation keys:
* expected_value - expected value
* actual_value - actual value
"""
default_error_messages = {
'required': 'Value is required',
'value': 'Value is incorrect',
}
def __init__(self, value, field_type=Any(), *args, **kwargs):
super(Constant, self).__init__(*args, **kwargs)
self.value = value
self.field_type = field_type
def load(self, data, *args, **kwargs):
value = self.field_type.load(data)
if value is MISSING or value is None:
self._fail('required')
if value != self.value:
self._fail('value', expected_value=self.value, actual_value=value)
return value
def dump(self, value, *args, **kwargs):
return self.field_type.dump(self.value, *args, **kwargs)
def __repr__(self):
return '<{klass} {value} of type {field_type}>'.format(
klass=self.__class__.__name__,
value=repr(self.value),
field_type=repr(self.field_type),
)
class Field(ErrorMessagesMixin):
"""Base class for describing :class:`Object` fields. Defines a way to access
object fields during serialization/deserialization. Usually it extracts data to
serialize/deserialize and call `self.field_type.load()` to do data
transformation.
:param Type field_type: Field type.
"""
def __init__(self, field_type, *args, **kwargs):
super(Field, self).__init__(*args, **kwargs)
self.field_type = field_type
def get_value(self, name, obj, context=None):
"""Get value of field `name` from object `obj`.
:params str name: Field name.
:params obj: Object to get field value from.
:returns: Field value.
"""
raise NotImplemented()
def set_value(self, name, obj, value, context=None):
"""Set given value of field `name` to object `obj`.
:params str name: Field name.
:params obj: Object to get field value from.
:params value: Field value to set.
"""
raise NotImplemented()
def load(self, name, data, context=None):
"""Deserialize data from primitive types. Raises
:exc:`~lollipop.errors.ValidationError` if data is invalid.
:param str name: Name of attribute to deserialize.
:param data: Raw data to get value to deserialize from.
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: Loaded data.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
return self.field_type.load(data.get(name, MISSING), context=context)
def load_into(self, obj, name, data, inplace=True, context=None):
"""Deserialize data from primitive types updating existing object.
Raises :exc:`~lollipop.errors.ValidationError` if data is invalid.
:param obj: Object to update with deserialized data.
:param str name: Name of attribute to deserialize.
:param data: Raw data to get value to deserialize from.
:param bool inplace: If True update data inplace;
otherwise - create new data.
:param kwargs: Same keyword arguments as for :meth:`load`.
:returns: Loaded data.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
if obj is None:
raise ValueError('Load target should not be None')
value = data.get(name, MISSING)
if value is MISSING:
return
target = self.get_value(name, obj, context=context)
if target is not None and target is not MISSING \
and hasattr(self.field_type, 'load_into'):
return self.field_type.load_into(target, value, inplace=inplace,
context=context)
else:
return self.field_type.load(value, context=context)
def dump(self, name, obj, context=None):
"""Serialize data to primitive types. Raises
:exc:`~lollipop.errors.ValidationError` if data is invalid.
:param str name: Name of attribute to serialize.
:param obj: Application object to extract serialized value from.
:returns: Serialized data.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
value = self.get_value(name, obj, context=context)
return self.field_type.dump(value, context=context)
def __repr__(self):
return '<{klass} {field_type}>'.format(
klass=self.__class__.__name__,
field_type=repr(self.field_type),
)
class AttributeField(Field):
"""Field that corresponds to object attribute.
Subclasses can use `name_to_attribute` field to convert field names to
attribute names.
:param Type field_type: Field type.
:param attribute: Can be either string or callable. If string, use given
attribute name instead of field name defined in object type.
If callable, should take a single argument - name of field - and
return name of corresponding object attribute to obtain value from.
"""
def __init__(self, field_type, attribute=None, *args, **kwargs):
super(AttributeField, self).__init__(field_type, *args, **kwargs)
if attribute is None:
attribute = identity
elif not callable(attribute):
attribute = constant(attribute)
self.name_to_attribute = attribute
def get_value(self, name, obj, context=None):
return getattr(obj, self.name_to_attribute(name), MISSING)
def set_value(self, name, obj, value, context=None):
setattr(obj, self.name_to_attribute(name), value)
class IndexField(Field):
"""Field that corresponds to object value at a particular index
(e.g. key of a dictionary).
Subclasses can use `name_to_key` field to convert field names to index keys.
:param Type field_type: Field type.
:param key: Can be either string or callable. If string, use given
key instead of field name defined in object type.
If callable, should take a single argument - name of field - and
return name of corresponding object key to obtain value from.
"""
def __init__(self, field_type, key=None, *args, **kwargs):
super(IndexField, self).__init__(field_type, *args, **kwargs)
if key is None:
key = identity
elif not callable(key):
key = constant(key)
self.name_to_key = key
def get_value(self, name, obj, context=None):
try:
return obj[self.name_to_key(name)]
except KeyError:
return MISSING
def set_value(self, name, obj, value, context=None):
obj[self.name_to_key(name)] = value
class MethodField(Field):
"""Field that is result of method invocation.
Example: ::
class Person(object):
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def get_name(self):
return self.first_name + ' ' + self.last_name
PersonType = Object({
'name': MethodField(String(), 'get_name'),
}, constructor=Person)
:param Type field_type: Field type.
:param get: Can be either string or callable. If string, use target object
method with given name to obain value.
If callable, should take field name and return name of object
method to use.
Referenced method should take no argument - new field value to set.
:param set: Can be either string or callable. If string, use target object
method with given name to set value in object.
If callable, should take field name and return name of object
method to use.
Referenced method should take 1 argument - new field value to set.
:param kwargs: Same keyword arguments as for :class:`Field`.
"""
def __init__(self, field_type, get=None, set=None, *args, **kwargs):
super(MethodField, self).__init__(field_type, *args, **kwargs)
if get is not None:
if not callable(get):
get = constant(get)
if set is not None:
if not callable(set):
set = constant(set)
self.get_method = get
self.set_method = set
def get_value(self, name, obj, context=None):
if not self.get_method:
return MISSING
method_name = self.get_method(name)
if not hasattr(obj, method_name):
raise ValueError('Object does not have method %s' % method_name)
method = getattr(obj, method_name)
if not callable(method):
raise ValueError('Value of %s is not callable' % method_name)
return make_context_aware(method, 0)(context)
def set_value(self, name, obj, value, context=None):
if not self.set_method:
return MISSING
method_name = self.set_method(name)
if not hasattr(obj, method_name):
raise ValueError('Object does not have method %s' % method_name)
method = getattr(obj, method_name)
if not callable(method):
raise ValueError('Value of %s is not callable' % method_name)
return make_context_aware(method, 1)(value, context)
class FunctionField(Field):
"""Field that is result of function invocation.
Example: ::
class Person(object):
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def get_name(person):
return person.first_name + ' ' + person.last_name
PersonType = Object({
'name': FunctionField(String(), get_name),
}, constructor=Person)
:param Type field_type: Field type.
:param callable get: Function that takes source object and returns
field value.
:param callable set: Function that takes source object and new field value
and sets that value to object field. Function return value is ignored.
"""
def __init__(self, field_type, get=None, set=None, *args, **kwargs):
super(FunctionField, self).__init__(field_type, *args, **kwargs)
if get is not None and not callable(get):
raise ValueError("Get function is not callable")
if set is not None and not callable(set):
raise ValueError("Set function is not callable")
if get is not None:
get = make_context_aware(get, 1)
if set is not None:
set = make_context_aware(set, 2)
self.get_func = get
self.set_func = set
def get_value(self, name, obj, context=None):
if self.get_func is None:
return MISSING
return self.get_func(obj, context)
def set_value(self, name, obj, value, context=None):
if self.set_func is None:
return MISSING
self.set_func(obj, value, context)
def inheritable_property(name):
cache_attr = '__' + name
@property
def getter(self):
if not hasattr(self, cache_attr):
value = getattr(self, '_' + name)
if value is None:
for base in self.bases:
value = getattr(base, name)
if value is not None:
break
else:
value = None
setattr(self, cache_attr, value)
return getattr(self, cache_attr)
return getter
class Object(Type):
"""An object type. Serializes to a dict of field names to serialized field
values. Parametrized with field names to types mapping.
The way values are obtained during serialization is determined by type of
field object in :attr:`~Object.fields` mapping (see :class:`AttributeField`,
:class:`MethodField` or :class:`FunctionField` for details). You can specify
either :class:`Field` object, a :class:`Type` object or any other value.
In case of :class:`Type`, it will be automatically wrapped with a default
field type, which is controlled by :attr:`~Object.default_field_type`
constructor argument.
In case of any other value it will be transformed into :class:`Constant`.
Example: ::
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
PersonType = Object({
'name': String(),
'age': Integer(),
}, constructor=Person)
PersonType.load({'name': 'John', 'age': 42})
# => Person(name='John', age=42)
:param base_or_fields: Either :class:`Object` instance or fields (See
`fields` argument). In case of fields, the actual fields argument should
not be specified.
:param fields: List of name-to-value tuples or mapping of object field names to
:class:`Type`, :class:`Field` objects or constant values.
:param callable contructor: Deserialized value constructor. Constructor
should take all fields values as keyword arguments.
:param Field default_field_type: Default field type to use for fields defined
by their type.
:param allow_extra_fields: If False, it will raise
:exc:`~lollipop.errors.ValidationError` for all extra dict keys during
deserialization. If True, will ignore all extra fields. If :class:`Type` or
:class:`Field`, extra fields will be loaded and validated with given
type/field and stored in load result.
:param only: Field name or list of field names to include in this object
from it's base classes. All other base classes' fields won't be used.
Does not affect own fields.
:param exclude: Field name or list of field names to exclude from this
object from base classes. All other base classes' fields will be included.
Does not affect own fields.
:param bool ordered: Serialize data into OrderedDict following fields order.
Fields in this case should be declared with a dictionary which also
supports ordering or with a list of tuples.
:param bool immutable: If False, object is allowed to be modified in-place;
if True - always create a copy with `constructor`.
:param kwargs: Same keyword arguments as for :class:`Type`.
Error message keys:
* required - value is required
* invalid - invalid value type. Interpolation data:
* data - actual value
* unknown - reported for unknown fields
"""
default_error_messages = {
'invalid': 'Value should be dict',
'unknown': 'Unknown field',
}
def __init__(self, bases_or_fields=None, fields=None, constructor=None,
default_field_type=None,
allow_extra_fields=None, only=None, exclude=None,
immutable=None, ordered=None,
**kwargs):
super(Object, self).__init__(**kwargs)
if bases_or_fields is None and fields is None:
raise ValueError('No base and/or fields are specified')
if isinstance(bases_or_fields, Type):
bases = [bases_or_fields]
if is_sequence(bases_or_fields) and \
all([isinstance(base, Type) for base in bases_or_fields]):
bases = bases_or_fields
elif is_sequence(bases_or_fields) or is_mapping(bases_or_fields):
if fields is None:
bases = []
fields = bases_or_fields
else:
raise ValueError('Unknown base object type: %r' % bases_or_fields)
self.bases = bases
self._default_field_type = default_field_type
self._constructor = constructor
if isinstance(allow_extra_fields, Type):
allow_extra_fields = \
(self.default_field_type or AttributeField)(allow_extra_fields)
self._allow_extra_fields = allow_extra_fields
self._immutable = immutable
self._ordered = ordered
if only is not None and not is_sequence(only):
only = [only]
if exclude is not None and not is_sequence(exclude):
exclude = [exclude]
self._only = only
self._exclude = exclude
self._fields = fields
@property
def fields(self):
if not hasattr(self, '_resolved_fields'):
self._resolved_fields = self._resolve_fields(self.bases, self._fields,
self._only, self._exclude)
return self._resolved_fields
default_field_type = inheritable_property('default_field_type')
constructor = inheritable_property('constructor')
allow_extra_fields = inheritable_property('allow_extra_fields')
immutable = inheritable_property('immutable')
ordered = inheritable_property('ordered')
def _normalize_field(self, value):
if isinstance(value, Field):
return value
if not isinstance(value, Type):
value = Constant(value)
return (self.default_field_type or AttributeField)(value)
def _resolve_fields(self, bases, fields, only=None, exclude=None):
all_fields = []
if bases is not None:
for base in bases:
all_fields += list(iteritems(base.fields))
if only is not None:
all_fields = [(name, field)
for name, field in all_fields
if name in only]
if exclude is not None:
all_fields = [(name, field)
for name, field in all_fields
if name not in exclude]
if fields is not None:
all_fields += [
(name, self._normalize_field(field))
for name, field in (iteritems(fields)
if is_mapping(fields) else fields)
]
return OrderedDict(all_fields)
def load(self, data, *args, **kwargs):
if data is MISSING or data is None:
self._fail('required')
if not is_mapping(data):
self._fail('invalid', data=data)
errors_builder = ValidationErrorBuilder()
result = {}
for name, field in iteritems(self.fields):
try:
loaded = field.load(name, data, *args, **kwargs)
if loaded != MISSING:
result[name] = loaded
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
if self.allow_extra_fields is False:
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
errors_builder.add_error(name, self._error_messages['unknown'])
elif isinstance(self.allow_extra_fields, Field):
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
try:
loaded = self.allow_extra_fields.load(
name, data, *args, **kwargs
)
if loaded != MISSING:
result[name] = loaded
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
errors_builder.raise_errors()
result = super(Object, self).load(result, *args, **kwargs)
result = self.constructor(**result) \
if self.constructor else OpenStruct(result)
return result
def load_into(self, obj, data, inplace=True, *args, **kwargs):
"""Load data and update existing object.
:param obj: Object to update with deserialized data.
:param data: Raw data to get value to deserialize from.
:param bool inplace: If True update data inplace;
otherwise - create new data.
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: Updated object.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
if obj is None:
raise ValueError('Load target should not be None')
if data is MISSING:
return
if data is None:
self._fail('required')
if not is_mapping(data):
self._fail('invalid', data=data)
errors_builder = ValidationErrorBuilder()
data1 = {}
for name, field in iteritems(self.fields):
try:
if name in data:
# Load new data
value = field.load_into(obj, name, data,
inplace=not self.immutable and inplace,
*args, **kwargs)
else:
# Retrive data from existing object
value = field.load(name, {
name: field.dump(name, obj, *args, **kwargs)
})
if value is not MISSING:
data1[name] = value
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
if self.allow_extra_fields is False:
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
errors_builder.add_error(name, self._error_messages['unknown'])
elif isinstance(self.allow_extra_fields, Field):
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
try:
loaded = self.allow_extra_fields.load_into(
obj, name, data,
inplace=not self.immutable and inplace,
*args, **kwargs
)
if loaded != MISSING:
data1[name] = loaded
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
errors_builder.raise_errors()
data2 = super(Object, self).load(data1, *args, **kwargs)
if self.immutable or not inplace:
result = data2
if self.constructor:
result = self.constructor(**result)
else:
for name, value in iteritems(data2):
field = self.fields.get(name, self.allow_extra_fields)
if not isinstance(field, Field):
continue
field.set_value(name, obj, value, *args, **kwargs)
result = obj
return result
def validate_for(self, obj, data, *args, **kwargs):
"""Takes target object and serialized data, tries to update that object
with data and validate result. Returns validation errors or None.
Object is not updated.
:param obj: Object to check data validity against. In case the data is
partial object is used to get the rest of data from.
:param data: Data to validate. Can be partial (not all schema field data
is present).
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: validation errors or None
"""
try:
self.load_into(obj, data, inplace=False, *args, **kwargs)
return None
except ValidationError as ve:
return ve.messages
def dump(self, obj, *args, **kwargs):
if obj is MISSING or obj is None:
self._fail('required')
errors_builder = ValidationErrorBuilder()
result = OrderedDict() if self.ordered else {}
for name, field in iteritems(self.fields):
try:
dumped = field.dump(name, obj, *args, **kwargs)
if dumped != MISSING:
result[name] = dumped
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
errors_builder.raise_errors()
return super(Object, self).dump(result, *args, **kwargs)
def __repr__(self):
return '<{klass}{fields}>'.format(
klass=self.__class__.__name__,
fields=''.join([' %s=%s' % (name, field_type.field_type)
for name, field_type in iteritems(self.fields)]),
)
class Modifier(Type):
"""Base class for modifiers - a wrapper for types that modify
how those types work. Also, it tries to be as transparent as possible
in regard to inner type, so it proxies all unknown attributes to inner type.
:param Type inner_type: Actual type that should be optional.
"""
def __init__(self, inner_type, **kwargs):
super(Modifier, self).__init__(
**dict({'name': inner_type.name,
'description': inner_type.description},
**kwargs)
)
self.inner_type = inner_type
def __hasattr__(self, name):
return hasattr(self.inner_type, name)
def __getattr__(self, name):
return getattr(self.inner_type, name)
def __repr__(self):
return '<{klass} {inner_type}>'.format(
klass=self.__class__.__name__,
inner_type=repr(self.inner_type),
)
class Optional(Modifier):
"""A modifier which makes values optional: if value is missing or None,
it will not transform it with an inner type but instead will return None
(or any other configured value).
Example: ::
UserType = Object({
'email': String(), # by default types require valid values
'name': Optional(String()), # value can be omitted or None
'role': Optional( # when value is omitted or None, use given value
String(validate=AnyOf(['admin', 'customer'])),
load_default='customer',
),
})
:param Type inner_type: Actual type that should be optional.
:param load_default: Value or callable. If value - it will be used when value
is missing on deserialization. If callable - it will be called with no
arguments to get value to use when value is missing on deserialization.
:param dump_default: Value or callable. If value - it will be used when value
is missing on serialization. If callable - it will be called with no
arguments to get value to use when value is missing on serialization.
:param kwargs: Same keyword arguments as for :class:`Type`.
"""
def __init__(self, inner_type,
load_default=None, dump_default=None,
**kwargs):
super(Optional, self).__init__(inner_type, **kwargs)
if not callable(load_default):
load_default = constant(load_default)
if not callable(dump_default):
dump_default = constant(dump_default)
self.load_default = make_context_aware(load_default, 0)
self.dump_default = make_context_aware(dump_default, 0)
def load(self, data, context=None, *args, **kwargs):
if data is MISSING or data is None:
return self.load_default(context)
return super(Optional, self).load(
self.inner_type.load(data, context=context, *args, **kwargs),
*args, **kwargs
)
def dump(self, data, context=None, *args, **kwargs):
if data is MISSING or data is None:
return self.dump_default(context)
return super(Optional, self).dump(
self.inner_type.dump(data, context=context, *args, **kwargs),
*args, **kwargs
)
def __repr__(self):
return '<{klass} {inner_type}>'.format(
klass=self.__class__.__name__,
inner_type=repr(self.inner_type),
)
class LoadOnly(Modifier):
"""A wrapper type which proxies loading to inner type but always returns
:obj:`MISSING` on dump.
Example: ::
UserType = Object({
'name': String(),
'password': LoadOnly(String()),
})
:param Type inner_type: Data type.
"""
def load(self, data, *args, **kwargs):
return self.inner_type.load(data, *args, **kwargs)
def dump(self, data, *args, **kwargs):
return MISSING
class DumpOnly(Modifier):
"""A wrapper type which proxies dumping to inner type but always returns
:obj:`MISSING` on load.
Example: ::
UserType = Object({
'name': String(),
'created_at': DumpOnly(DateTime()),
})
:param Type inner_type: Data type.
"""
def load(self, data, *args, **kwargs):
return MISSING
def dump(self, data, *args, **kwargs):
return self.inner_type.dump(data, *args, **kwargs)
class Transform(Modifier):
"""A wrapper type which allows us to convert data structures to an inner type,
then loaded or dumped with a customized format.
Example: ::
Point = namedtuple('Point', ['x', 'y'])
PointType = Transform(
Tuple(Integer(), Integer()),
post_load=lambda values: Point(values[0], values[1]),
pre_dump=lambda point: [point.x, point.y],
)
PointType.dump((Point(x=1, y=2)))
# => [1,2]
PointType.load([1,2])
# => Point(x=1, y=2)
:param Type inner_type: Data type.
:param pre_load: Modify data before it is passed to inner_type load. Argument
should be a callable taking one argument - data - and returning updated data.
Optionally it can take a second argument - context.
:param post_load: Modify data after it is returned from inner_type load.
Argument should be a callable taking one argument - data - and returning
updated data. Optionally it can take a second argument - context.
:param pre_dump: Modify value before it passed to inner_type dump. Argument
should be a callable taking one argument - value - and returning updated value.
Optionally it can take a second argument - context.
:param post_dump: Modify value after it is returned from inner_type dump.
Argument should be a callable taking one argument - value - and returning
updated value. Optionally it can take a second argument - context.
"""
def __init__(self, inner_type,
pre_load=identity, post_load=identity,
pre_dump=identity, post_dump=identity):
super(Transform, self).__init__(inner_type)
self.pre_load = make_context_aware(pre_load, 1)
self.post_load = make_context_aware(post_load, 1)
self.pre_dump = make_context_aware(pre_dump, 1)
self.post_dump = make_context_aware(post_dump, 1)
def load(self, data, context=None):
return self.post_load(
self.inner_type.load(
self.pre_load(data, context),
context,
),
context,
)
def dump(self, value, context=None):
return self.post_dump(
self.inner_type.dump(
self.pre_dump(value, context),
context,
),
context,
)
def validated_type(base_type, name=None, validate=None):
"""Convenient way to create a new type by adding validation to existing type.
Example: ::
Ipv4Address = validated_type(
String, 'Ipv4Address',
# regexp simplified for demo purposes
Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address')
)
Percentage = validated_type(Integer, validate=Range(0, 100))
# The above is the same as
class Ipv4Address(String):
def __init__(self, *args, **kwargs):
super(Ipv4Address, self).__init__(*args, **kwargs)
self.validators.insert(0, Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address'))
class Percentage(Integer):
def __init__(self, *args, **kwargs):
super(Percentage, self).__init__(*args, **kwargs)
self.validators.insert(0, Range(0, 100))
:param Type base_type: Base type for a new type.
:param name str: Optional class name for new type
(will be shown in places like repr).
:param validate: A validator or list of validators for this data type.
See `Type.validate` for details.
"""
if validate is None:
validate = []
if not is_sequence(validate):
validate = [validate]
class ValidatedSubtype(base_type):
if name is not None:
__name__ = name
def __init__(self, *args, **kwargs):
super(ValidatedSubtype, self).__init__(*args, **kwargs)
for validator in reversed(validate):
self.validators.insert(0, validator)
return ValidatedSubtype
| {
"content_hash": "2988ce62d6eea152ff935fb851dede31",
"timestamp": "",
"source": "github",
"line_count": 1814,
"max_line_length": 101,
"avg_line_length": 34.65821389195149,
"alnum_prop": 0.577636392556068,
"repo_name": "maximkulkin/lollipop",
"id": "b33c72aed7e209f8ace6f769f4c86bed904b22ed",
"size": "62870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lollipop/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "240179"
}
],
"symlink_target": ""
} |
import json
import os
import platform
import shutil
import unittest
from unittest import mock
# pylint: disable=import-error
from opentelemetry.sdk import trace, resources
from opentelemetry.sdk.trace.export import SpanExportResult
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace import Link, SpanContext, SpanKind
from opentelemetry.trace.status import Status, StatusCode
from azure.monitor.opentelemetry.exporter.export._base import ExportResult
from azure.monitor.opentelemetry.exporter.export.trace._exporter import (
AzureMonitorTraceExporter,
_check_instrumentation_span,
_get_trace_export_result,
)
from azure.monitor.opentelemetry.exporter._utils import azure_monitor_context
def throw(exc_type, *args, **kwargs):
def func(*_args, **_kwargs):
raise exc_type(*args, **kwargs)
return func
# pylint: disable=import-error
# pylint: disable=protected-access
# pylint: disable=too-many-lines
class TestAzureTraceExporter(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ.clear()
os.environ[
"APPINSIGHTS_INSTRUMENTATIONKEY"
] = "1234abcd-5678-4efa-8abc-1234567890ab"
os.environ["APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"] = "true"
cls._exporter = AzureMonitorTraceExporter()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls._exporter.storage._path, True)
def test_constructor(self):
"""Test the constructor."""
exporter = AzureMonitorTraceExporter(
connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab",
)
self.assertEqual(
exporter._instrumentation_key,
"4321abcd-5678-4efa-8abc-1234567890ab",
)
def test_from_connection_string(self):
exporter = AzureMonitorTraceExporter.from_connection_string(
"InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab"
)
self.assertTrue(isinstance(exporter, AzureMonitorTraceExporter))
self.assertEqual(
exporter._instrumentation_key,
"4321abcd-5678-4efa-8abc-1234567890ab",
)
def test_export_empty(self):
exporter = self._exporter
result = exporter.export([])
self.assertEqual(result, SpanExportResult.SUCCESS)
def test_export_failure(self):
exporter = self._exporter
with mock.patch(
"azure.monitor.opentelemetry.exporter.AzureMonitorTraceExporter._transmit"
) as transmit: # noqa: E501
test_span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557338,
is_remote=False,
),
)
test_span.start()
test_span.end()
transmit.return_value = ExportResult.FAILED_RETRYABLE
storage_mock = mock.Mock()
exporter.storage.put = storage_mock
result = exporter.export([test_span])
self.assertEqual(result, SpanExportResult.FAILURE)
self.assertEqual(storage_mock.call_count, 1)
def test_export_success(self):
exporter = self._exporter
test_span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557338,
is_remote=False,
),
)
test_span.start()
test_span.end()
with mock.patch(
"azure.monitor.opentelemetry.exporter.AzureMonitorTraceExporter._transmit"
) as transmit: # noqa: E501
transmit.return_value = ExportResult.SUCCESS
storage_mock = mock.Mock()
exporter._transmit_from_storage = storage_mock
result = exporter.export([test_span])
self.assertEqual(result, SpanExportResult.SUCCESS)
self.assertEqual(storage_mock.call_count, 1)
@mock.patch("azure.monitor.opentelemetry.exporter.export.trace._exporter._logger")
def test_export_exception(self, logger_mock):
test_span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557338,
is_remote=False,
),
)
test_span.start()
test_span.end()
exporter = self._exporter
with mock.patch(
"azure.monitor.opentelemetry.exporter.AzureMonitorTraceExporter._transmit",
throw(Exception),
): # noqa: E501
result = exporter.export([test_span])
self.assertEqual(result, SpanExportResult.FAILURE)
self.assertEqual(logger_mock.exception.called, True)
def test_export_not_retryable(self):
exporter = self._exporter
test_span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557338,
is_remote=False,
),
)
test_span.start()
test_span.end()
with mock.patch(
"azure.monitor.opentelemetry.exporter.AzureMonitorTraceExporter._transmit"
) as transmit: # noqa: E501
transmit.return_value = ExportResult.FAILED_NOT_RETRYABLE
result = exporter.export([test_span])
self.assertEqual(result, SpanExportResult.FAILURE)
def test_span_to_envelope_partA(self):
exporter = self._exporter
resource = resources.Resource(
{"service.name": "testServiceName",
"service.namespace": "testServiceNamespace",
"service.instance.id": "testServiceInstanceId"})
context = SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557338,
is_remote=False,
)
test_span = trace._Span(
name="test",
context=context,
resource=resource,
attributes={"enduser.id":"testId"},
parent=context,
)
test_span.start()
test_span.end()
envelope = exporter._span_to_envelope(test_span)
self.assertEqual(envelope.instrumentation_key,
"1234abcd-5678-4efa-8abc-1234567890ab")
self.assertIsNotNone(envelope.tags)
self.assertEqual(envelope.tags.get("ai.device.id"), azure_monitor_context["ai.device.id"])
self.assertEqual(envelope.tags.get("ai.device.locale"), azure_monitor_context["ai.device.locale"])
self.assertEqual(envelope.tags.get("ai.device.osVersion"), azure_monitor_context["ai.device.osVersion"])
self.assertEqual(envelope.tags.get("ai.device.type"), azure_monitor_context["ai.device.type"])
self.assertEqual(envelope.tags.get("ai.internal.sdkVersion"), azure_monitor_context["ai.internal.sdkVersion"])
self.assertEqual(envelope.tags.get("ai.cloud.role"), "testServiceNamespace.testServiceName")
self.assertEqual(envelope.tags.get("ai.cloud.roleInstance"), "testServiceInstanceId")
self.assertEqual(envelope.tags.get("ai.internal.nodeName"), "testServiceInstanceId")
self.assertEqual(envelope.tags.get("ai.operation.id"), "{:032x}".format(context.trace_id))
self.assertEqual(envelope.tags.get("ai.user.id"), "testId")
self.assertEqual(envelope.tags.get("ai.operation.parentId"), "{:016x}".format(context.span_id))
def test_span_to_envelope_partA_default(self):
exporter = self._exporter
resource = resources.Resource(
{"service.name": "testServiceName"})
context = SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557338,
is_remote=False,
)
test_span = trace._Span(
name="test",
context=context,
resource=resource,
)
test_span.start()
test_span.end()
envelope = exporter._span_to_envelope(test_span)
self.assertEqual(envelope.tags.get("ai.cloud.role"), "testServiceName")
self.assertEqual(envelope.tags.get("ai.cloud.roleInstance"), platform.node())
self.assertEqual(envelope.tags.get("ai.internal.nodeName"), envelope.tags.get("ai.cloud.roleInstance"))
def test_span_to_envelope_client_http(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.CLIENT HTTP
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
"peer.service": "service",
"http.user_agent": "agent",
},
kind=SpanKind.CLIENT,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "HTTP")
self.assertEqual(envelope.data.base_data.target, "service")
self.assertEqual(
envelope.data.base_data.data,
"https://www.wikipedia.org/wiki/Rabbit",
)
self.assertEqual(envelope.data.base_data.result_code, "200")
self.assertEqual(envelope.tags["ai.user.userAgent"], "agent")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# Name empty
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.url": "https://www.example.com",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.name, "GET /")
# Target
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.url": "https://www.example.com:1234",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "www.example.com:1234")
span._attributes = {
"http.method": "GET",
"http.scheme": "http",
"http.url": "http://www.example.com:80",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "www.example.com")
span._attributes = {
"http.method": "GET",
"http.scheme": "http",
"http.url": "http://www.example.com:80",
"http.host": "www.wikipedia.org:1234",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "www.wikipedia.org:1234")
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.url": "http://www.example.com:80",
"http.host": "www.wikipedia.org:443",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "www.wikipedia.org")
# url
# spell-checker:ignore ddds
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.host": "www.wikipedia.org",
"http.target": "/path/12314/?q=ddds#123"
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.data, "https://www.wikipedia.org/path/12314/?q=ddds#123")
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"net.peer.port": "8080",
"net.peer.name": "example.com",
"http.target": "/path/12314/?q=ddds#123"
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.data, "https://example.com:8080/path/12314/?q=ddds#123")
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"net.peer.port": "8080",
"net.peer.ip": "192.168.0.1",
"http.target": "/path/12314/?q=ddds#123"
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.data, "https://192.168.0.1:8080/path/12314/?q=ddds#123")
def test_span_to_envelope_client_db(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.CLIENT Db
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"db.system": "db2 system",
"peer.service": "service",
"db.statement": "SELECT * from test",
},
kind=SpanKind.CLIENT,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "db2 system")
self.assertEqual(envelope.data.base_data.target, "service")
self.assertEqual(envelope.data.base_data.data, "SELECT * from test")
self.assertEqual(envelope.data.base_data.result_code, "0")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# data
span._attributes = {
"db.system": "postgresql",
"peer.service": "service",
"db.operation": "SELECT",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.data, "SELECT")
# Target
span._attributes = {
"db.system": "postgresql",
"db.statement": "SELECT",
"db.name": "testDb",
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "service|testDb")
span._attributes = {
"db.system": "postgresql",
"db.statement": "SELECT",
"db.name": "testDb",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "testDb")
span._attributes = {
"db.system": "postgresql",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "postgresql")
# Type
span._attributes = {
"db.system": "mssql",
"db.statement": "SELECT",
"db.name": "testDb",
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "SQL")
span._attributes = {
"db.system": "mysql",
"db.statement": "SELECT",
"db.name": "testDb",
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "mysql")
span._attributes = {
"db.system": "postgresql",
"db.statement": "SELECT",
"db.name": "testDb",
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "postgresql")
span._attributes = {
"db.system": "mongodb",
"db.statement": "SELECT",
"db.name": "testDb",
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "mongodb")
span._attributes = {
"db.system": "redis",
"db.statement": "SELECT",
"db.name": "testDb",
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "redis")
def test_span_to_envelope_client_rpc(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.CLIENT rpc
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"peer.service": "service",
"rpc.system": "rpc",
"rpc.service": "Test service",
},
kind=SpanKind.CLIENT,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_data.result_code, "0")
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "rpc.system")
self.assertEqual(envelope.data.base_data.target, "service")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# target
span._attributes = {
"rpc.system": "rpc",
"rpc.service": "Test service",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "rpc")
def test_span_to_envelope_client_messaging(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.CLIENT messaging
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"messaging.system": "messaging",
"messaging.destination": "celery",
},
kind=SpanKind.CLIENT,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_data.result_code, "0")
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "messaging")
self.assertEqual(envelope.data.base_data.target, "celery")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# target
span._attributes = {
"messaging.system": "messaging",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "messaging")
def test_span_to_envelope_client_azure(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.CLIENT messaging
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"az.namespace": "Microsoft.EventHub",
"peer.address": "test_address",
"message_bus.destination": "test_destination",
},
kind=SpanKind.CLIENT,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_data.result_code, "0")
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "Microsoft.EventHub")
self.assertEqual(envelope.data.base_data.target, "test_address/test_destination")
self.assertEqual(len(envelope.data.base_data.properties), 2)
# target
span._attributes = {
"messaging.system": "messaging",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "messaging")
def test_span_to_envelope_producer_messaging(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.PRODUCER messaging
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"messaging.system": "messaging",
"messaging.destination": "celery",
},
kind=SpanKind.PRODUCER,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_data.result_code, "0")
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "Queue Message | messaging")
self.assertEqual(envelope.data.base_data.target, "celery")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# target
span._attributes = {
"messaging.system": "messaging",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "messaging")
# azure specific
# spell-checker:ignore myeventhub
span._attributes = {
"az.namespace": "Microsoft.EventHub",
"peer.address": "Test_peer",
"message_bus.destination": "/myeventhub",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "Queue Message | Microsoft.EventHub")
self.assertEqual(envelope.data.base_data.target, "Test_peer//myeventhub")
self.assertEqual(len(envelope.data.base_data.properties), 2)
def test_span_to_envelope_internal(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.INTERNAL
context = SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
)
span = trace._Span(
name="test",
context=context,
parent=context,
attributes={},
kind=SpanKind.INTERNAL,
)
span.start(start_time=start_time)
span.end(end_time=end_time)
span._status = Status(status_code=StatusCode.OK)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "InProc")
self.assertEqual(envelope.data.base_data.result_code, "0")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# azure specific
span._attributes = {
"az.namespace": "Microsoft.EventHub",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.type, "InProc | Microsoft.EventHub")
self.assertEqual(len(envelope.data.base_data.properties), 1)
def test_span_envelope_request_azure(self):
exporter = self._exporter
start_time = 4000000000000000000
end_time = start_time + 1001000000
# SpanKind.SERVER/CONSUMER Azure specific
links = []
links.append(
Link(
context=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
attributes={
"enqueuedTime": 1000000000000
}
)
)
links.append(
Link(
context=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
attributes={
"enqueuedTime": 3000000000000
}
)
)
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"az.namespace": "Microsoft.EventHub",
"peer.address": "Test_peer",
"message_bus.destination": "/myeventhub",
},
kind=SpanKind.CONSUMER,
links=links,
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.Request"
)
self.assertEqual(envelope.tags["ai.operation.name"], "test")
self.assertEqual(envelope.data.base_type, "RequestData")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertEqual(envelope.data.base_data.response_code, "0")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_data.source, "Test_peer//myeventhub")
self.assertEqual(envelope.data.base_data.measurements["timeSinceEnqueued"], 2000000000000)
self.assertEqual(len(envelope.data.base_data.properties), 3)
# enqueued time
links = []
links.append(
Link(
context=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
attributes={
"enqueuedTime": 5000000000000
}
)
)
links.append(
Link(
context=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
attributes={
"enqueuedTime": 6000000000000
}
)
)
span._links = links
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.measurements["timeSinceEnqueued"], 0)
def test_span_envelope_server_http(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.SERVER HTTP
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"http.method": "GET",
"http.path": "/wiki/Rabbit",
"http.route": "/wiki/Rabbit",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
"http.user_agent": "agent",
"http.client_ip": "client_ip",
},
kind=SpanKind.SERVER,
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.Request"
)
self.assertEqual(envelope.data.base_type, "RequestData")
self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertEqual(envelope.data.base_data.response_code, "200")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.tags["ai.operation.name"], "GET /wiki/Rabbit")
self.assertEqual(envelope.tags["ai.user.userAgent"], "agent")
self.assertEqual(envelope.tags["ai.location.ip"], "client_ip")
self.assertEqual(envelope.data.base_data.url, "https://www.wikipedia.org/wiki/Rabbit")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# location
span._attributes = {
"http.method": "GET",
"net.peer.ip": "peer_ip"
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.tags["ai.location.ip"], "peer_ip")
# url
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.target": "/path",
"http.host": "www.example.org",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.url, "https://www.example.org/path")
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.target": "/path",
"net.host.port": "35555",
"http.server_name": "example.com",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.url, "https://example.com:35555/path")
span._attributes = {
"http.method": "GET",
"http.scheme": "https",
"http.target": "/path",
"net.host.port": "35555",
"net.host.name": "localhost",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.url, "https://localhost:35555/path")
# ai.operation.name
span._attributes = {
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit/test",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.tags["ai.operation.name"], "GET /wiki/Rabbit/test")
self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit/test")
span._attributes = {
"http.method": "GET",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.tags["ai.operation.name"], "test")
self.assertEqual(envelope.data.base_data.name, "test")
def test_span_envelope_server_messaging(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# SpanKind.SERVER messaging
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"messaging.system": "messaging",
"net.peer.name": "test name",
"net.peer.ip": "127.0.0.1",
"messaging.destination": "celery",
},
kind=SpanKind.SERVER,
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.tags["ai.operation.name"], "test")
self.assertEqual(envelope.data.base_type, "RequestData")
self.assertEqual(envelope.data.base_data.name, "test")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.tags["ai.location.ip"], "127.0.0.1")
self.assertEqual(envelope.data.base_data.source, "test name/celery")
self.assertEqual(len(envelope.data.base_data.properties), 0)
# source
span._attributes = {
"messaging.system": "messaging",
"net.peer.ip": "127.0.0.1",
"messaging.destination": "celery",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.source, "127.0.0.1/celery")
span._attributes = {
"messaging.system": "messaging",
"messaging.destination": "celery",
}
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.source, "celery")
def test_span_to_envelope_success_error(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# Status/success error
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"test": "asd",
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
},
kind=SpanKind.CLIENT,
)
span._status = Status(status_code=StatusCode.ERROR)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertFalse(envelope.data.base_data.success)
def test_span_to_envelope_sample_rate(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"test": "asd",
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
"_MS.sampleRate": 50,
},
kind=SpanKind.CLIENT,
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.sample_rate, 50)
self.assertIsNone(envelope.data.base_data.properties.get("_MS.sampleRate"))
def test_span_to_envelope_properties(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# Properties
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"test": "asd",
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
},
kind=SpanKind.CLIENT,
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(len(envelope.data.base_data.properties), 1)
self.assertEqual(envelope.data.base_data.properties["test"], "asd")
def test_span_to_envelope_properties_links(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
# Links
links = []
links.append(
Link(
context=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
)
)
)
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
},
kind=SpanKind.CLIENT,
links=links,
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(len(envelope.data.base_data.properties), 1)
json_dict = json.loads(
envelope.data.base_data.properties["_MS.links"]
)[0]
self.assertEqual(json_dict["id"], "a6f5d48acb4d31da")
def test_span_to_envelope_properties_std_metrics(self):
exporter = self._exporter
start_time = 1575494316027613500
end_time = start_time + 1001000000
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
attributes={
"http.method": "GET",
"http.url": "https://www.wikipedia.org/wiki/Rabbit",
"http.status_code": 200,
},
kind=SpanKind.CLIENT,
instrumentation_scope=InstrumentationScope("opentelemetry.instrumentation.requests")
)
span._status = Status(status_code=StatusCode.OK)
span.start(start_time=start_time)
span.end(end_time=end_time)
envelope = exporter._span_to_envelope(span)
self.assertEqual(len(envelope.data.base_data.properties), 1)
self.assertEqual(envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"], "True")
def test_span_events_to_envelopes_exception(self):
exporter = self._exporter
time = 1575494316027613500
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
parent=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
kind=SpanKind.CLIENT,
)
attributes = {
SpanAttributes.EXCEPTION_TYPE: "ZeroDivisionError",
SpanAttributes.EXCEPTION_MESSAGE: "zero division error",
SpanAttributes.EXCEPTION_STACKTRACE: "Traceback: ZeroDivisionError, division by zero",
SpanAttributes.EXCEPTION_ESCAPED: "True",
}
span.add_event("exception", attributes, time)
span.start()
span.end()
span._status = Status(status_code=StatusCode.OK)
envelopes = exporter._span_events_to_envelopes(span)
self.assertEqual(len(envelopes), 1)
envelope = envelopes[0]
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.Exception"
)
self.assertEqual(envelope.instrumentation_key,
"1234abcd-5678-4efa-8abc-1234567890ab")
self.assertIsNotNone(envelope.tags)
self.assertEqual(envelope.tags.get("ai.device.id"), azure_monitor_context["ai.device.id"])
self.assertEqual(envelope.tags.get("ai.device.locale"), azure_monitor_context["ai.device.locale"])
self.assertEqual(envelope.tags.get("ai.device.osVersion"), azure_monitor_context["ai.device.osVersion"])
self.assertEqual(envelope.tags.get("ai.device.type"), azure_monitor_context["ai.device.type"])
self.assertEqual(envelope.tags.get("ai.internal.sdkVersion"), azure_monitor_context["ai.internal.sdkVersion"])
self.assertEqual(envelope.tags.get("ai.operation.id"), "{:032x}".format(span.context.trace_id))
self.assertEqual(envelope.tags.get("ai.operation.parentId"), "{:016x}".format(span.context.span_id))
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(len(envelope.data.base_data.properties), 0)
self.assertEqual(len(envelope.data.base_data.exceptions), 1)
self.assertEqual(envelope.data.base_data.exceptions[0].type_name, "ZeroDivisionError")
self.assertEqual(envelope.data.base_data.exceptions[0].message, "zero division error")
self.assertEqual(envelope.data.base_data.exceptions[0].has_full_stack, True)
self.assertEqual(envelope.data.base_data.exceptions[0].stack, "Traceback: ZeroDivisionError, division by zero")
self.assertEqual(envelope.data.base_type, "ExceptionData")
def test_span_events_to_envelopes_message(self):
exporter = self._exporter
time = 1575494316027613500
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
parent=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
kind=SpanKind.CLIENT,
)
attributes = {
"test": "asd",
}
span.add_event("test event", attributes, time)
span.start()
span.end()
span._status = Status(status_code=StatusCode.OK)
envelopes = exporter._span_events_to_envelopes(span)
self.assertEqual(len(envelopes), 1)
envelope = envelopes[0]
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.Message"
)
self.assertEqual(envelope.instrumentation_key,
"1234abcd-5678-4efa-8abc-1234567890ab")
self.assertIsNotNone(envelope.tags)
self.assertEqual(envelope.tags.get("ai.device.id"), azure_monitor_context["ai.device.id"])
self.assertEqual(envelope.tags.get("ai.device.locale"), azure_monitor_context["ai.device.locale"])
self.assertEqual(envelope.tags.get("ai.device.osVersion"), azure_monitor_context["ai.device.osVersion"])
self.assertEqual(envelope.tags.get("ai.device.type"), azure_monitor_context["ai.device.type"])
self.assertEqual(envelope.tags.get("ai.internal.sdkVersion"), azure_monitor_context["ai.internal.sdkVersion"])
self.assertEqual(envelope.tags.get("ai.operation.id"), "{:032x}".format(span.context.trace_id))
self.assertEqual(envelope.tags.get("ai.operation.parentId"), "{:016x}".format(span.context.span_id))
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(len(envelope.data.base_data.properties), 1)
self.assertEqual(envelope.data.base_data.properties["test"], "asd")
self.assertEqual(envelope.data.base_data.message, "test event")
self.assertEqual(envelope.data.base_type, "MessageData")
def test_span_events_to_envelopes_sample_rate(self):
exporter = self._exporter
time = 1575494316027613500
span = trace._Span(
name="test",
context=SpanContext(
trace_id=36873507687745823477771305566750195431,
span_id=12030755672171557337,
is_remote=False,
),
parent=SpanContext(
trace_id=36873507687745823477771305566750195432,
span_id=12030755672171557338,
is_remote=False,
),
kind=SpanKind.CLIENT,
attributes={
"_MS.sampleRate": 50,
}
)
attributes = {
"test": "asd",
"_MS.sampleRate": 75,
}
span.add_event("test event", attributes, time)
span.start()
span.end()
span._status = Status(status_code=StatusCode.OK)
envelopes = exporter._span_events_to_envelopes(span)
self.assertEqual(len(envelopes), 1)
envelope = envelopes[0]
self.assertEqual(
envelope.name, "Microsoft.ApplicationInsights.Message"
)
self.assertEqual(envelope.sample_rate, 50)
self.assertEqual(envelope.instrumentation_key,
"1234abcd-5678-4efa-8abc-1234567890ab")
self.assertIsNotNone(envelope.tags)
self.assertEqual(envelope.tags.get("ai.device.id"), azure_monitor_context["ai.device.id"])
self.assertEqual(envelope.tags.get("ai.device.locale"), azure_monitor_context["ai.device.locale"])
self.assertEqual(envelope.tags.get("ai.device.osVersion"), azure_monitor_context["ai.device.osVersion"])
self.assertEqual(envelope.tags.get("ai.device.type"), azure_monitor_context["ai.device.type"])
self.assertEqual(envelope.tags.get("ai.internal.sdkVersion"), azure_monitor_context["ai.internal.sdkVersion"])
self.assertEqual(envelope.tags.get("ai.operation.id"), "{:032x}".format(span.context.trace_id))
self.assertEqual(envelope.tags.get("ai.operation.parentId"), "{:016x}".format(span.context.span_id))
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
self.assertEqual(len(envelope.data.base_data.properties), 1)
self.assertEqual(envelope.data.base_data.properties["test"], "asd")
self.assertEqual(envelope.data.base_data.message, "test event")
self.assertEqual(envelope.data.base_type, "MessageData")
class TestAzureTraceExporterUtils(unittest.TestCase):
def test_get_trace_export_result(self):
self.assertEqual(
_get_trace_export_result(ExportResult.SUCCESS),
SpanExportResult.SUCCESS,
)
self.assertEqual(
_get_trace_export_result(ExportResult.FAILED_NOT_RETRYABLE),
SpanExportResult.FAILURE,
)
self.assertEqual(
_get_trace_export_result(ExportResult.FAILED_RETRYABLE),
SpanExportResult.FAILURE,
)
self.assertEqual(_get_trace_export_result(None), None)
def test_check_instrumentation_span(self):
span = mock.Mock()
span.instrumentation_scope.name = "opentelemetry.instrumentation.test"
with mock.patch(
"azure.monitor.opentelemetry.exporter._utils.add_instrumentation"
) as add:
_check_instrumentation_span(span)
add.assert_called_once_with("test")
def test_check_instrumentation_span_not_instrumentation(self):
span = mock.Mock()
span.instrumentation_scope.name = "__main__"
with mock.patch(
"azure.monitor.opentelemetry.exporter._utils.add_instrumentation"
) as add:
_check_instrumentation_span(span)
add.assert_not_called()
| {
"content_hash": "a189bccbbb1ffdcfe86bac8411fa20ec",
"timestamp": "",
"source": "github",
"line_count": 1288,
"max_line_length": 119,
"avg_line_length": 40.21816770186335,
"alnum_prop": 0.5935213605914944,
"repo_name": "Azure/azure-sdk-for-python",
"id": "70c1e4d1fff14dc00a6dda4ee828079d985e0f6c",
"size": "51897",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from articulate import define
from vizone.client import init
from vizone.resource.asset import create_asset, asset_Item as Item
@define('a Viz One client for <hostname>')
class VizOneSession:
def __init__(self, hostname):
self.hostname = hostname
def __expose__(self):
return {
'hostname': self.hostname,
}
@define('authenticate with <username> and <password>')
def authenticate(self, username, password):
self.client = init(self.hostname, username, password)
@define('create asset <title>')
def create_asset(self, title):
return create_asset(Item(title=title))
| {
"content_hash": "1269a7cc1fb8c323629b61f5c4f0a426",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6651090342679128,
"repo_name": "eblade/articulate",
"id": "9e3ada544824c8418902fdfcf485d9d4cd14e057",
"size": "642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/vizone_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "31046"
},
{
"name": "Python",
"bytes": "34275"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('survey', '0010_remove_question_layers'),
]
operations = [
migrations.CreateModel(
name='BifSettings',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('notes', models.TextField(blank=True, default='')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('bif', models.TextField(blank=True, default='')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| {
"content_hash": "0c20898eb1f65b8998282f1ff8eac143",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 33.93333333333333,
"alnum_prop": 0.5677799607072691,
"repo_name": "Ecotrust/floodplain-restoration",
"id": "f995befdfe9995f6b53eaff5c2ade12317bae3a6",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dst/survey/migrations/0011_bifsettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18811"
},
{
"name": "HTML",
"bytes": "91644"
},
{
"name": "JavaScript",
"bytes": "128271"
},
{
"name": "Python",
"bytes": "182327"
},
{
"name": "Shell",
"bytes": "4981"
}
],
"symlink_target": ""
} |
"""The standard runtime library."""
from runtime.env import (Datatype, Value, Function, Operator,
Signature, FunctionBinding, CastException, ANY, NULL,
RuntimeException)
NUMBER = Datatype("*number", None, ANY)
def cast_integer(value):
"""Casts a value to an INTEGER."""
if isinstance(value, Value):
if value.datatype in (FLOAT, INTEGER):
return Value(INTEGER, int(value.data))
if value.datatype is BOOLEAN:
return Value(INTEGER, 1 if value.data else 0)
if value.datatype is NULL:
return Value(INTEGER, 0)
raise CastException(value, INTEGER)
INTEGER = Datatype("int", cast_integer, NUMBER, lambda x: "%d" % x)
def cast_float(value):
"""Casts a value to a FLOAT."""
if isinstance(value, Value):
if value.datatype in (FLOAT, INTEGER):
return Value(FLOAT, float(value.data))
if value.datatype is NULL:
return Value(FLOAT, 0.0)
raise CastException(value, FLOAT)
FLOAT = Datatype("float", cast_float, NUMBER, lambda x: "%f" % x)
def cast_string(value):
"""Casts a value to a STRING."""
if isinstance(value, Value):
if value.datatype is INTEGER:
return Value(STRING, "%d" % value.data)
if value.datatype in (FLOAT, STRING):
return Value(STRING, str(value.data))
if value.datatype is BOOLEAN:
return Value(STRING, "true" if value.data else "false")
if value.datatype is NULL:
return Value(STRING, "")
raise CastException(value, STRING)
STRING = Datatype("string", cast_string, ANY, lambda x: "\"" + x + "\"")
def cast_boolean(value):
"""Casts a value to a BOOLEAN."""
if isinstance(value, Value):
if value.datatype is INTEGER:
return Value(BOOLEAN, True if value.data > 0 else False)
if value.datatype is BOOLEAN:
return Value(BOOLEAN, bool(value.data))
if value.datatype is NULL:
return Value(BOOLEAN, False)
raise CastException(value, BOOLEAN)
BOOLEAN = Datatype("bool", cast_boolean, ANY, lambda x: "true" if x else "false")
def cast_function(value):
"""Casts a value to a FUNCTION."""
if isinstance(value, Value):
if value.datatype is FUNCTION:
return Value(FUNCTION, value.data)
if value.datatype is NULL:
return Value(FUNCTION, None)
raise CastException(value, FUNCTION)
FUNCTION = Datatype("func", cast_function, ANY, lambda x: "function")
def cast_list(value):
"""Casts a value to a LIST."""
if isinstance(value, Value):
if value.datatype in (LIST, STRING):
return Value(LIST, list(value.data))
if value.datatype is NULL:
return Value(LIST, [])
raise CastException(value, LIST)
LIST = Datatype("LIST", cast_list, ANY, lambda x: "list")
def cast_map(value):
"""Casts a value to a MAP."""
if isinstance(value, Value):
if value.datatype is MAP:
return Value(MAP, dict(value.data))
if value.datatype is NULL:
return Value(MAP, dict())
raise CastException(value, MAP)
MAP = Datatype("map", cast_map, ANY, lambda x: "map")
def cast_set(value):
"""Casts a value to a SET."""
if isinstance(value, Value):
if value.datatype in (SET, LIST):
return Value(SET, set(value.data))
if value.datatype is NULL:
return Value(SET, set())
raise CastException(value, SET)
SET = Datatype("set", cast_set, ANY, lambda x: "set")
def cast_object(value):
"""Casts a value to an OBJECT."""
if isinstance(value, Value):
return Value(OBJECT, value.data)
raise CastException(value, OBJECT)
OBJECT = Datatype("object", cast_object, ANY, lambda x: "object")
def _add_operation():
"""The add operation."""
def add(context):
"""Add two number values."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
return Value(var_a.datatype, var_a.data + var_b.data)
add_node = FunctionBinding(add)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], add_node),
Signature([
Value(STRING, None, "a"),
Value(ANY, None, "b"),
], add_node),
]
return Function(signatures, "#add")
ADD_FUNCTION = _add_operation()
PLUS_OPERATOR = Operator(ADD_FUNCTION, "+")
def _sub_function():
"""The sub operation."""
def sub(context):
"""Subtract two number values."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
return Value(var_a.datatype, var_a.data - var_b.data)
sub_node = FunctionBinding(sub)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], sub_node),
]
return Function(signatures, "#sub")
SUB_FUNCTION = _sub_function()
MINUS_OPERATOR = Operator(SUB_FUNCTION, "-")
def _mul_operation():
def mul(context):
"""Multiply two numbers."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
return Value(var_a.datatype, var_a.data * var_b.data)
mul_node = FunctionBinding(mul)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], mul_node),
]
return Function(signatures, "#mul")
MUL_FUNCTION = _mul_operation()
MUL_OPERATOR = Operator(MUL_FUNCTION, "*")
def _pow_operation():
def pow(context):
"""Calculate a to the power of b."""
var_b = context.find("id", "b")
var_a = context.find("id", "a")
if var_b.datatype != var_a.datatype:
var_b = FLOAT.cast(var_b)
var_a = FLOAT.cast(var_a)
return Value(var_b.datatype, var_a.data**var_b.data)
pow_node = FunctionBinding(pow)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], pow_node)
]
return Function(signatures, "#pow")
POW_FUNCTION = _pow_operation()
POW_OPERATOR = Operator(POW_FUNCTION, "^")
def _div_operation():
def div(context):
"""Divide two numbers."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
if var_b.data == 0:
raise RuntimeException("Can not divide by 0")
result = var_a.data / var_b.data
if var_a.datatype is INTEGER:
result = int(result)
return Value(var_a.datatype, result)
div_node = FunctionBinding(div)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], div_node)
]
return Function(signatures, "#div")
DIV_FUNCTION = _div_operation()
DIV_OPERATOR = Operator(DIV_FUNCTION, "/")
def _mod_operation():
def mod(context):
"""Get the modulo of two numbers."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
if var_b.data == 0:
raise RuntimeException("Can not divide by 0")
return Value(INTEGER, var_a.data % var_b.data)
mod_node= FunctionBinding(mod)
signatures = [
Signature([
Value(INTEGER, None, "a"),
Value(INTEGER, None, "b"),
], mod_node)
]
return Function(signatures, "#mod")
MOD_FUNCTION = _mod_operation()
MOD_OPERATOR = Operator(MOD_FUNCTION, "%")
def _equ_operation():
def equ(context):
"""Checks if two values are equal."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
if var_a.datatype is not var_b.datatype:
raise RuntimeException("Two values of different types may not be compared.")
return Value(BOOLEAN, var_a == var_b)
equ_node = FunctionBinding(equ)
signatures = [
Signature([
Value(ANY, None, "a"),
Value(ANY, None, "b"),
], equ_node)
]
return Function(signatures, "#equ")
EQU_FUNCTION = _equ_operation()
EQU_OPERATOR = Operator(EQU_FUNCTION, "==")
def _and_operation():
def and_o(context):
"""Returns true if both values are true."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data and var_b.data)
and_node = FunctionBinding(and_o)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
Value(BOOLEAN, None, "b"),
], and_node),
]
return Function(signatures, "#and")
AND_FUNCTION = _and_operation()
AND_OPERATOR = Operator(AND_FUNCTION, "&&")
def _or_operation():
def or_o(context):
"""Returns true if one value is true."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data or var_b.data)
or_node = FunctionBinding(or_o)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
Value(BOOLEAN, None, "b"),
], or_node),
]
return Function(signatures, "#or")
OR_FUNCTION = _or_operation()
OR_OPERATOR = Operator(OR_FUNCTION, "||")
def _xor_operation():
def xor(context):
"""Returns true if one of the two values is true."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, (var_a.data or var_b.data) and var_a.data != var_b.data)
xor_node = FunctionBinding(xor)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
Value(BOOLEAN, None, "b"),
], xor_node),
]
return Function(signatures, "#xor")
XOR_FUNCTION = _xor_operation()
XOR_OPERATOR = Operator(XOR_FUNCTION, "^|")
def _neq_operation():
def neq(context):
"""Returns true if both values are unequal."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
if var_a.datatype is not var_b.datatype:
raise RuntimeException("Two values of different types may not be compared.")
return Value(BOOLEAN, var_a != var_b)
neq_node = FunctionBinding(neq)
signatures = [
Signature([
Value(ANY, None, "a"),
Value(ANY, None, "b"),
], neq_node),
]
return Function(signatures, "#neq")
NEQ_FUNCTION = _neq_operation()
NEQ_OPERATOR = Operator(NEQ_FUNCTION, "!=")
def _sm_operation():
def smaller(context):
"""Returns true if one value is smaller than the other."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data < var_b.data)
sm_node = FunctionBinding(smaller)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], sm_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], sm_node),
]
return Function(signatures, "#sm")
SM_FUNCTION = _sm_operation()
SM_OPERATOR = Operator(SM_FUNCTION, "<")
def _lg_operation():
def larger(context):
"""Returns true if a is larger than b."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data > var_b.data)
lg_node = FunctionBinding(larger)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], lg_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], lg_node),
]
return Function(signatures, "#lg")
LG_FUNCTION = _lg_operation()
LG_OPERATOR = Operator(LG_FUNCTION, ">")
def _sme_operation():
def sme(context):
"""Returns true if a is smaller or equal to b."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data <= var_b.data)
sme_node = FunctionBinding(sme)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], sme_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], sme_node),
]
return Function(signatures, "#sme")
SME_FUNCTION = _sme_operation()
SME_OPERATOR = Operator(SME_FUNCTION, "<=")
def _lge_operation():
def lge(context):
"""Returns true if a is larger or equal to b."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data >= var_b.data)
lge_node = FunctionBinding(lge)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], lge_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], lge_node),
]
return Function(signatures, "#lge")
LGE_FUNCTION = _lge_operation()
LGE_OPERATOR = Operator(LGE_FUNCTION, ">=")
def _unmi_operation():
def unmi(context):
"""Inverts the numeric value."""
var_a = context.find("id", "a")
return Value(var_a.datatype, -var_a.data)
unmi_node = FunctionBinding(unmi)
signatures = [
Signature([
Value(NUMBER, None, "a"),
], unmi_node)
]
return Function(signatures, "#unmi")
UNMI_FUNCTION = _unmi_operation()
MINUS_OPERATOR.add_function(UNMI_FUNCTION)
def _unpl_operation():
def unpl(context):
"""Does nothing special. Added for code consistency."""
var_a = context.find("id", "a")
return Value(var_a.datatype, var_a.data)
unpl_node = FunctionBinding(unpl)
signatures = [
Signature([
Value(NUMBER, None, "a"),
], unpl_node)
]
return Function(signatures, "#unpl")
UNPL_FUNCTION = _unpl_operation()
PLUS_OPERATOR.add_function(UNPL_FUNCTION)
def _uninv_operation():
def uninv(context):
"""Inverts a bool value."""
var_a = context.find("id", "a")
return Value(var_a.datatype, not var_a.data)
uninv_node = FunctionBinding(uninv)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
], uninv_node)
]
return Function(signatures, "#uninv")
UNINV_FUNCTION = _uninv_operation()
UNINV_OPERATOR = Operator(UNINV_FUNCTION, "!")
EXPORTS = [
# Datatypes
INTEGER, FLOAT, BOOLEAN, STRING, LIST, SET, MAP, OBJECT, FUNCTION, ANY, NULL,
# Operators
PLUS_OPERATOR, MINUS_OPERATOR, MUL_OPERATOR, DIV_OPERATOR, EQU_OPERATOR,
AND_OPERATOR, OR_OPERATOR, XOR_OPERATOR, NEQ_OPERATOR,
SM_OPERATOR, LG_OPERATOR, SME_OPERATOR, LGE_OPERATOR,
UNINV_OPERATOR, MOD_OPERATOR, POW_OPERATOR,
# Functions
ADD_FUNCTION, UNPL_FUNCTION, SUB_FUNCTION, UNMI_FUNCTION,
MUL_FUNCTION, DIV_FUNCTION, EQU_FUNCTION,
AND_FUNCTION, OR_FUNCTION, XOR_FUNCTION, NEQ_FUNCTION,
SM_FUNCTION, LG_FUNCTION, SME_FUNCTION, LGE_FUNCTION,
UNINV_FUNCTION, MOD_FUNCTION, POW_FUNCTION,
]
| {
"content_hash": "def5ecd311f372ee20bc2a64e6fd8572",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 88,
"avg_line_length": 29.99009900990099,
"alnum_prop": 0.573786728293166,
"repo_name": "lnsp/tea",
"id": "c94a721370b8cc167ac7ca1e2d13f12e926de7cc",
"size": "15145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtime/lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "25"
},
{
"name": "Python",
"bytes": "88546"
},
{
"name": "Shell",
"bytes": "28"
},
{
"name": "Tea",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""
All prices going up 5p
1 sheet (double-sided) increases from 30p to 35p (plus VAT)
2 sheets (double-sided) increases from 35p to 40p (plus VAT)
3 sheets (double-sided) increases from 40p to 45p (plus VAT)
4 sheets (double-sided) increases from 45p to 50p (plus VAT)
5 sheets (double-sided) increases from 50p to 55p (plus VAT)
First class letters:
1 sheet (double-sided) increases from 56p to 61p (plus VAT)
2 sheets (double-sided) increases from 61p to 66p (plus VAT)
3 sheets (double-sided) increases from 66p to 71p (plus VAT)
4 sheets (double-sided) increases from 71p to 76p (plus VAT)
5 sheets (double-sided) increases from 76p to 81p (plus VAT)
Revision ID: 0306_letter_rates_price_rise
Revises: 0305_add_gp_org_type
Create Date: 2019-09-25 15:43:09.388251
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = "0306_letter_rates_price_rise"
down_revision = "0305_add_gp_org_type"
CHANGEOVER_DATE = datetime(2019, 9, 30, 23, 0)
def upgrade():
# all old rates are going in the bin
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
base_prices = {
"second": 30,
"first": 56,
}
op.bulk_insert(
LetterRate.__table__,
[
{
"id": uuid.uuid4(),
"start_date": CHANGEOVER_DATE,
"end_date": None,
"sheet_count": sheet_count,
"rate": (base_prices[post_class] + (5 * sheet_count)) / 100.0,
"crown": crown,
"post_class": post_class,
}
for sheet_count, crown, post_class in itertools.product(range(1, 6), [True, False], ["first", "second"])
],
)
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
| {
"content_hash": "5e771d0da5c4e2b167868383149c9964",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 116,
"avg_line_length": 32.338235294117645,
"alnum_prop": 0.6512050932241928,
"repo_name": "alphagov/notifications-api",
"id": "6d8752043f15bd9acf4f54e010bf13b0c6953229",
"size": "2199",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/0306_letter_rates_price_rise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.utils import timezone
# Create your models here.
| {
"content_hash": "3840281cefc8b90a426138e19c197840",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 39,
"avg_line_length": 25.5,
"alnum_prop": 0.7843137254901961,
"repo_name": "Saket-Komawar/Forex",
"id": "7245e4ee3c439ac1f2f7a0e94bf04d9b5353f1a1",
"size": "102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solo/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "80026"
},
{
"name": "HTML",
"bytes": "27162"
},
{
"name": "JavaScript",
"bytes": "177035"
},
{
"name": "Python",
"bytes": "39153"
},
{
"name": "R",
"bytes": "323"
}
],
"symlink_target": ""
} |
""" QiMessaging Python bindings
Provided features are very close to C++, Python style.
"""
def load_lib_qipyessaging():
""" Load _qipyessaging.so and its dependencies.
This makes _qipyessaging usable from a relocatable
SDK without having to set LD_LIBRARY_PATH
"""
import ctypes
import os
import sys
deps = [
"libboost_python.so",
"libboost_system.so",
"libboost_chrono.so",
"libboost_program_options.so",
"libboost_thread.so",
"libboost_filesystem.so",
"libboost_regex.so",
"libboost_locale.so",
"libboost_signals.so",
"libqi.so",
"libqitype.so",
"libqimessaging.so",
"libqipython.so",
]
if sys.version_info[0] == 2:
deps.append("libqipython.so")
else:
deps.append("libqipython3.so")
this_dir = os.path.abspath(os.path.dirname(__file__))
for dep in deps:
full_path = os.path.join(this_dir, "..", dep)
try:
ctypes.cdll.LoadLibrary(full_path)
except Exception:
pass
def set_dll_directory():
import ctypes
import os
this_dir= os.path.dirname(__file__)
sdk_dir = os.path.join(this_dir, "..", "..")
sdk_dir = os.path.abspath(sdk_dir)
bin_dir = os.path.join(sdk_dir, "bin")
if os.path.exists(bin_dir):
ctypes.windll.kernel32.SetDllDirectoryA(bin_dir)
def _on_import_module():
import sys
if sys.platform.startswith("linux"):
load_lib_qipyessaging()
if sys.platform.startswith("win"):
set_dll_directory()
#######
_on_import_module()
from _qi import Application as _Application
from _qi import ApplicationSession as _ApplicationSession
from _qi import ( FutureState, FutureTimeout, Future, Promise,
Property, Session, Signal,
createObject, registerObjectFactory,
async, PeriodicTask)
from . import path
from ._type import ( Void, Bool,
Int8, UInt8,
Int16, UInt16,
Int32, UInt32,
Int64, UInt64,
Float, Double,
String, List,
Map, Struct,
Object, Dynamic,
Buffer, AnyArguments,
typeof, _isinstance)
from ._binder import bind, nobind, singleThreaded, multiThreaded
from .logging import fatal, error, warning, info, verbose, Logger
from .logging import getLogger, logFatal, logError, logWarning, logInfo, logVerbose, logDebug #deprecated
from .translator import defaultTranslator, tr, Translator
#rename isinstance here. (isinstance should not be used in this file)
isinstance = _isinstance
_app = None
#we want to stop all thread before python start destroying
#module and the like. (this avoid callback calling python while
#it's destroying)
def _stopApplication():
global _app
if _app is not None:
_app.stop()
del _app
_app = None
#application is a singleton, it should live till the end of the program
#because it own eventloops
def Application(args=None, raw=False, autoExit=True, url=None):
global _app
if args is None:
args = [ "" ]
if url is None:
url = "tcp://127.0.0.1:9559"
if _app is None:
if raw:
_app = _Application(args)
else:
_app = _ApplicationSession(args, autoExit, url);
else:
raise Exception("Application was already initialized")
return _app
ApplicationSession = Application
__all__ = ["FutureState",
"FutureTimeout",
"Future",
"Promise",
"Property",
"Session",
"Signal",
"createObject",
"registerObjectFactory",
"async",
"Void", "Bool", "Int8", "UInt8", "Int16", "UInt16", "Int32", "UInt32", "Int64", "UInt64",
"Float", "Double", "String", "List", "Map", "Struct", "Object", "Dynamic", "Buffer", "AnyArguments",
"typeof", "isinstance",
"bind", "nobind", "singleThreaded", "multiThreaded",
"fatal", "error", "warning", "info", "verbose",
"getLogger", "logFatal", "logError", "logWarning", "logInfo", "logVerbose", "logDebug", #deprecated
"Logger", "defaultTranslator", "tr", "Translator"
]
import atexit
atexit.register(_stopApplication)
# Do not pollute namespace
del atexit
__version__ = "2.1.2.17"
| {
"content_hash": "2dd25e618078a99241507d5f6290332a",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 111,
"avg_line_length": 30.79591836734694,
"alnum_prop": 0.5825049701789264,
"repo_name": "plus44/hcr-2016",
"id": "d5c331c12a05b1435f84a61de2a753593c3c48f6",
"size": "4706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/pynaoqi-python2.7-2.1.2.17-linux64/qi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "8500"
},
{
"name": "CMake",
"bytes": "2496"
},
{
"name": "GLSL",
"bytes": "721"
},
{
"name": "HTML",
"bytes": "143"
},
{
"name": "Matlab",
"bytes": "4873"
},
{
"name": "Objective-C",
"bytes": "2721"
},
{
"name": "Python",
"bytes": "548575"
},
{
"name": "Ruby",
"bytes": "1314"
},
{
"name": "Shell",
"bytes": "1436"
},
{
"name": "Swift",
"bytes": "882369"
}
],
"symlink_target": ""
} |
from numpy.testing import assert_allclose, assert_equal
from chaco.image_utils import trim_screen_rect, X_PARAMS, Y_PARAMS
SINGLE_PIXEL = (1, 1)
FOUR_PIXELS = (2, 2)
def midpoint(x, length):
return x + length / 2.0
def assert_midpoints_equal(bounds_list):
for i_pos, i_length in (X_PARAMS, Y_PARAMS):
x_mid = [midpoint(bnd[i_pos], bnd[i_length]) for bnd in bounds_list]
assert_equal(x_mid[0], x_mid[1])
def test_viewer_zoomed_into_single_pixel():
screen_rect = [0, 0, 100, 100]
view_rect = [10, 11, 1, 2]
new_rect = trim_screen_rect(screen_rect, view_rect, SINGLE_PIXEL)
assert_allclose(new_rect, view_rect)
def test_viewer_at_corner_of_single_image():
offset = 0.2
screen_rect = [1, 1, 1, 1]
new_size = [1-offset, 1-offset]
down_right = [1+offset, 1+offset, 1, 1]
new_rect = trim_screen_rect(screen_rect, down_right, SINGLE_PIXEL)
expected_rect = down_right[:2] + new_size
assert_midpoints_equal((new_rect, expected_rect))
up_left = [1-offset, 1-offset, 1, 1]
new_rect = trim_screen_rect(screen_rect, up_left, SINGLE_PIXEL)
expected_rect = [1, 1] + new_size
assert_midpoints_equal((new_rect, expected_rect))
def test_viewer_zoomed_into_four_pixel_intersection():
screen_rect = [0, 0, 100, 100] # 4-pixel intersection at (50, 50)
view_rectangles = ([49, 49, 2, 2], # Centered pixel intersection
[49, 49, 3, 3], # Intersection at 1/3 of view
[49, 49, 2, 3]) # Intersection at 1/2, 1/3 of view
for view_rect in view_rectangles:
new_rect = trim_screen_rect(screen_rect, view_rect, FOUR_PIXELS)
yield assert_midpoints_equal, (new_rect, screen_rect)
def test_viewer_at_corner_of_four_pixel_image():
offset = 0.2
screen_rect = [1, 1, 1, 1]
view_rectangles = ([1+offset, 1+offset, 1, 1], # Shifted down and right
[1-offset, 1-offset, 1, 1]) # Shifted up and left
for view_rect in view_rectangles:
new_rect = trim_screen_rect(screen_rect, view_rect, FOUR_PIXELS)
yield assert_equal, new_rect, screen_rect
| {
"content_hash": "77bfa16866024ca4770c669f7348548c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 34.98360655737705,
"alnum_prop": 0.6283973758200563,
"repo_name": "tommy-u/chaco",
"id": "62ae83418c06b63d90eedf8be6a2431c9abd8869",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chaco/tests/test_image_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "2475987"
}
],
"symlink_target": ""
} |
from securitycenter import SecurityCenter4
import time
# NOTE: all time is based on Unix time. 86400 is 1 day in seconds.
update_list = [{
'asset_id': 28,
'filters': {
'sensor': 'HomeNet_Snort',
'endtime': int(time.time()),
'starttime': (int(time.time()) - 86400),
},
},{
'asset_id': 29,
'filters': {
'type': 'nbs',
'endtime': int(time.time()),
'starttime': (int(time.time()) - 86400),
},
},
]
host = 'HOST'
username = 'api_user'
password = 's3cr3tp@ssw0rd'
sc = SecurityCenter4(host)
sc.login(username, password)
for update in update_list:
events = sc.query('sumip', source='lce', **update['filters'])
ips = []
for event in events:
ips.append(event['address'])
sc.asset_update(update['asset_id'], ips=ips)
| {
"content_hash": "5ccbdd99fa53f2d96956be2394eeb7bc",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 23.542857142857144,
"alnum_prop": 0.5716019417475728,
"repo_name": "SteveMcGrath/pySecurityCenter",
"id": "574a7534af2e27917bf2300f815ea1ad2e6369d7",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sc4/lce_assets/lce_assets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53488"
}
],
"symlink_target": ""
} |
from django.utils.module_loading import import_string
from payments.exceptions import InvalidSettingsEntryForPaymentMethodException, InvalidPaymentFormParentClass, \
PaymentViewDoesNotExistInTheViewsMapException
class AbstractPaymentInterface:
PAYMENT_STATUS_MAPPING = dict()
REQUIRED_CONFIG_VALUES = list()
DEFAULT_VIEW_OPTION = 'pay'
VIEWS_MAP = dict()
VIEWS_MODULE = ''
__payment_form_class = None
__api = None
def __init__(self, payment, config, *args, **kwargs):
self.__payment = payment
self.__config = config
self.clean_config()
self.setup_api()
@property
def api(self):
return self.__api
@api.setter
def api(self, value):
self.__api = value
@property
def payment(self):
"""Get Payment object.
Property has been used to disable changing it from outside."""
return self.__payment
@property
def config(self):
"""Property for config so it cannot be changed from the outside."""
return self.__config
def get_payment_form_class(self):
# Let's assume it is possible for a payment adapter to have no forms
if not self.__payment_form_class:
return None
# Raise an exception if form does not inherit from AbstractPaymentForm
if not isinstance(self.__payment_form_class, AbstractPaymentForm):
raise InvalidPaymentFormParentClass
return self.__payment_form_class
def create_payment(self, *args, **kwargs):
"""Create a payment instance with a payment service.
It's good to call self.save_payment() after to update the data in database."""
raise NotImplementedError
def execute_payment(self, *args, **kwargs):
"""Execute the payment with a payment service.
It's good to call self.save_payment() after to update the data in database."""
raise NotImplementedError
def retrieve_payment(self, *args, **kwargs):
"""Retrieve payment details from a payment servicem.
It's good to call self.save_payment() after to update the data in database."""
raise NotImplementedError
def setup_api(self):
"""Set up API instance for the payment system."""
# self.api is a proposed attribute to store it.
raise NotImplementedError
def save_payment(self, *args, commit=True, **kwargs):
"""Save payment information to the database.
This method is also a good place to process data before saving."""
raise NotImplementedError
def clean_config(self):
"""Validate config, i.e. the config stored in django.conf.settings.
When subclassing remember to call parent's method as well."""
for value in self.REQUIRED_CONFIG_VALUES:
if not (value in self.__config):
raise InvalidSettingsEntryForPaymentMethodException('Missing key in the settings entry: %s' % (value,))
def map_payment_status(self, status):
"""Map payment status from a payment service to that in the Payment model."""
self.payment.status = self.PAYMENT_STATUS_MAPPING[status]
@staticmethod
def is_setup(self):
"""Check if interface is set up. Simply return True if you don't want to do anything with it."""
raise NotImplementedError
def get_view_class(self, option):
"""Get a view class for a specified operation."""
if not isinstance(option, str):
raise TypeError("Option has to be a string.")
try:
view_class_name = self.VIEWS_MAP[option.lower()]
except KeyError:
raise PaymentViewDoesNotExistInTheViewsMapException('option: %s' % (option,))
else:
return self.__import_view_class(view_class_name)
def __import_view_class(self, view_class_name):
"""Import a view class specifiec in the attribute.
If you want to pass a custom module with views specify a class attribute on a payment interface
called 'VIEWS_MODULE'.
"""
if not self.VIEWS_MODULE:
# Work out module of interfaces and cut ".interfaces" bit from it.
payment_adapter_module_path = '.'.join(self.__module__.split('.')[:-1])
views_module_path = '%s.%s' % (payment_adapter_module_path, 'views')
else:
views_module_path = self.VIEWS_MODULE
return import_string('%s.%s' % (views_module_path, view_class_name))
| {
"content_hash": "b249892ecf36fb1adeb93af1c91a40da",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 119,
"avg_line_length": 36.144,
"alnum_prop": 0.6405489154493138,
"repo_name": "tm-kn/django-universal-payments",
"id": "666a2957a8a728e1f598e8eb87ecd270c0ba8dfe",
"size": "4518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payments/interfaces.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "49992"
}
],
"symlink_target": ""
} |
import unittest
import mock
from ...management.jobs import Jobs
class TestJobs(unittest.TestCase):
def test_init_with_optionals(self):
t = Jobs(domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get("Auth0-Client", None)
self.assertEqual(telemetry_header, None)
@mock.patch("auth0.v3.management.jobs.RestClient")
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain="domain", token="jwttoken")
j.get("an-id")
mock_instance.get.assert_called_with(
"https://domain/api/v2/jobs/an-id",
)
@mock.patch("auth0.v3.management.jobs.RestClient")
def test_get_failed_job(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain="domain", token="jwttoken")
j.get_failed_job("an-id")
mock_instance.get.assert_called_with(
"https://domain/api/v2/jobs/an-id/errors",
)
@mock.patch("auth0.v3.management.jobs.RestClient")
def test_get_job_results(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain="domain", token="jwttoken")
j.get_results("an-id")
# Should use the 'get by id' URL
mock_instance.get.assert_called_with(
"https://domain/api/v2/jobs/an-id",
)
@mock.patch("auth0.v3.management.jobs.RestClient")
def test_export_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain="domain", token="jwttoken")
j.export_users({"connection_id": "cxn_id", "format": "json"})
mock_instance.post.assert_called_with(
"https://domain/api/v2/jobs/users-exports",
data={"connection_id": "cxn_id", "format": "json"},
)
@mock.patch("auth0.v3.management.jobs.RestClient")
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain="domain", token="jwttoken")
j.import_users(connection_id="1234", file_obj={})
mock_instance.file_post.assert_called_with(
"https://domain/api/v2/jobs/users-imports",
data={
"connection_id": "1234",
"upsert": "false",
"send_completion_email": "true",
"external_id": None,
},
files={"users": {}},
)
j.import_users(
connection_id="1234",
file_obj={},
upsert=True,
send_completion_email=False,
external_id="ext-id-123",
)
mock_instance.file_post.assert_called_with(
"https://domain/api/v2/jobs/users-imports",
data={
"connection_id": "1234",
"upsert": "true",
"send_completion_email": "false",
"external_id": "ext-id-123",
},
files={"users": {}},
)
j.import_users(
connection_id="1234", file_obj={}, upsert=False, send_completion_email=True
)
mock_instance.file_post.assert_called_with(
"https://domain/api/v2/jobs/users-imports",
data={
"connection_id": "1234",
"upsert": "false",
"send_completion_email": "true",
"external_id": None,
},
files={"users": {}},
)
@mock.patch("auth0.v3.management.jobs.RestClient")
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain="domain", token="jwttoken")
j.send_verification_email({"a": "b", "c": "d"})
mock_instance.post.assert_called_with(
"https://domain/api/v2/jobs/verification-email", data={"a": "b", "c": "d"}
)
| {
"content_hash": "3bedefa3c2936394959796971fae89d7",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 87,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.549234693877551,
"repo_name": "auth0/auth0-python",
"id": "5d7d6700338b5b1cb037a9ac99f1ab22587c4093",
"size": "3920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auth0/v3/test/management/test_jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "437970"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
} |
import inspect
import webob
from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
class RequestTest(test.TestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertEqual(None, request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
for content_type in ('application/xml',
'application/vnd.openstack.compute+xml',
'application/json',
'application/vnd.openstack.compute+json'):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_cache_and_retrieve_instances(self):
request = wsgi.Request.blank('/foo')
instances = []
for x in xrange(3):
instances.append({'uuid': 'uuid%s' % x})
# Store 2
request.cache_db_instances(instances[:2])
# Store 1
request.cache_db_instance(instances[2])
self.assertEqual(request.get_db_instance('uuid0'),
instances[0])
self.assertEqual(request.get_db_instance('uuid1'),
instances[1])
self.assertEqual(request.get_db_instance('uuid2'),
instances[2])
self.assertEqual(request.get_db_instance('uuid3'), None)
self.assertEqual(request.get_db_instances(),
{'uuid0': instances[0],
'uuid1': instances[1],
'uuid2': instances[2]})
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class DictSerializerTest(test.TestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual(serializer.serialize({}, 'update'), '')
class XMLDictSerializerTest(test.TestCase):
def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
serializer = wsgi.XMLDictSerializer(xmlns="asdf")
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_xml)
class JSONDictSerializerTest(test.TestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
class TextDeserializerTest(test.TestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual(deserializer.deserialize({}, 'update'), {})
class JSONDeserializerTest(test.TestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
class XMLDeserializerTest(test.TestCase):
def test_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
<d><e>1</e></d>
<f>1</f>
</a>
""".strip()
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
deserializer = wsgi.XMLDeserializer(metadata=metadata)
self.assertEqual(deserializer.deserialize(xml), as_dict)
def test_xml_empty(self):
xml = """<a></a>"""
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(deserializer.deserialize(xml), as_dict)
class ResourceTest(test.TestCase):
def test_resource_call(self):
class Controller(object):
def index(self, req):
return 'off'
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.body, 'off')
self.assertEqual(response.status_int, 200)
def test_resource_not_authorized(self):
class Controller(object):
def index(self, req):
raise exception.NotAuthorized()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.status_int, 403)
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'index', None, '')
actual = resource.dispatch(method, None, {'pants': 'off'})
expected = 'off'
self.assertEqual(actual, expected)
def test_get_method_unknown_controller_method(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(AttributeError, resource.get_method,
None, 'create', None, '')
def test_get_method_action_json(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_xml(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction>')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.MalformedRequestBody, resource.get_method,
None, 'action', 'application/json', '{}')
def test_get_method_unknown_controller_action(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(KeyError, resource.get_method,
None, 'action', 'application/json',
'{"barAction": true}')
def test_get_method_action_method(self):
class Controller():
def action(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction')
self.assertEqual(controller.action, method)
def test_get_action_args(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(resource.get_action_args(env), expected)
def test_get_body_bad_content(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/none'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, None)
self.assertEqual(body, '')
def test_get_body_no_content_type(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, None)
self.assertEqual(body, '')
def test_get_body_no_content_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = ''
content_type, body = resource.get_body(request)
self.assertEqual(content_type, None)
self.assertEqual(body, '')
def test_get_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, 'application/json')
self.assertEqual(body, 'foo')
def test_deserialize_badtype(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.InvalidContentType,
resource.deserialize,
controller.index, 'application/none', 'foo')
def test_deserialize_default(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/json', 'foo')
self.assertEqual(obj, 'json')
def test_deserialize_decorator(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/xml', 'foo')
self.assertEqual(obj, 'xml')
def test_register_actions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
@wsgi.action('barAction')
def _action_bar(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_actions)
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({
'fooAction': extended._action_foo,
'barAction': extended._action_bar,
}, resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp, id, body):
return None
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_extensions)
self.assertEqual({}, resource.wsgi_action_extensions)
extended = ControllerExtended()
resource.register_extensions(extended)
self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
self.assertEqual({'fooAction': [extended._action_foo]},
resource.wsgi_action_extensions)
def test_get_method_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'index', None, '')
self.assertEqual(method, controller.index)
self.assertEqual(extensions, [extended.index])
def test_get_method_action_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
class ControllerExtended(wsgi.Controller):
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(method, controller._action_foo)
self.assertEqual(extensions, [extended._action_foo])
def test_get_method_action_whitelist_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('create')
def _create(self, req, body):
pass
@wsgi.action('delete')
def _delete(self, req, id):
pass
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_actions(extended)
method, extensions = resource.get_method(None, 'create',
'application/json',
'{"create": true}')
self.assertEqual(method, extended._create)
self.assertEqual(extensions, [])
method, extensions = resource.get_method(None, 'delete', None, None)
self.assertEqual(method, extended._delete)
self.assertEqual(extensions, [])
def test_pre_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(called, [])
self.assertEqual(response, None)
self.assertEqual(list(post), [extension2, extension1])
def test_pre_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
resp_obj = yield
called.append('post1')
def extension2(req):
called.append('pre2')
resp_obj = yield
called.append('post2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
post = list(post)
self.assertEqual(called, ['pre1', 'pre2'])
self.assertEqual(response, None)
self.assertEqual(len(post), 2)
self.assertTrue(inspect.isgenerator(post[0]))
self.assertTrue(inspect.isgenerator(post[1]))
for gen in post:
try:
gen.send(None)
except StopIteration:
continue
self.assertEqual(called, ['pre1', 'pre2', 'post2', 'post1'])
def test_pre_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield 'foo'
def extension2(req):
called.append('pre2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(called, ['pre1'])
self.assertEqual(response, 'foo')
self.assertEqual(post, [])
def test_post_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual(called, [2, 1])
self.assertEqual(response, None)
def test_post_process_extensions_regular_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return 'foo'
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual(called, [2])
self.assertEqual(response, 'foo')
def test_post_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
resp_obj = yield
called.append(1)
def extension2(req):
resp_obj = yield
called.append(2)
ext1 = extension1(None)
ext1.next()
ext2 = extension2(None)
ext2.next()
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual(called, [2, 1])
self.assertEqual(response, None)
def test_post_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
resp_obj = yield
called.append(1)
def extension2(req):
resp_obj = yield
called.append(2)
yield 'foo'
ext1 = extension1(None)
ext1.next()
ext2 = extension2(None)
ext2.next()
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual(called, [2])
self.assertEqual(response, 'foo')
def test_resource_exception_handler_type_error(self):
"""A TypeError should be translated to a Fault/HTTP 400"""
def foo(a,):
return a
try:
with wsgi.ResourceExceptionHandler():
foo() # generate a TypeError
self.fail("Should have raised a Fault (HTTP 400)")
except wsgi.Fault as fault:
self.assertEqual(400, fault.status_int)
class ResponseObjectTest(test.TestCase):
def test_default_code(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.code, 200)
def test_modified_code(self):
robj = wsgi.ResponseObject({})
robj._default_code = 202
self.assertEqual(robj.code, 202)
def test_override_default_code(self):
robj = wsgi.ResponseObject({}, code=404)
self.assertEqual(robj.code, 404)
def test_override_modified_code(self):
robj = wsgi.ResponseObject({}, code=404)
robj._default_code = 202
self.assertEqual(robj.code, 404)
def test_set_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj.headers, {'header': 'foo'})
def test_get_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj['hEADER'], 'foo')
def test_del_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
del robj['hEADER']
self.assertFalse('header' in robj.headers)
def test_header_isolation(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
hdrs = robj.headers
hdrs['hEADER'] = 'bar'
self.assertEqual(robj['hEADER'], 'foo')
def test_default_serializers(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.serializers, {})
def test_bind_serializers(self):
robj = wsgi.ResponseObject({}, json='foo')
robj._bind_method_serializers(dict(xml='bar', json='baz'))
self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
def test_get_serializer(self):
robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
_mtype, serializer = robj.get_serializer(content_type)
self.assertEqual(serializer, mtype)
def test_get_serializer_defaults(self):
robj = wsgi.ResponseObject({})
default_serializers = dict(json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
self.assertRaises(exception.InvalidContentType,
robj.get_serializer, content_type)
_mtype, serializer = robj.get_serializer(content_type,
default_serializers)
self.assertEqual(serializer, mtype)
def test_serialize(self):
class JSONSerializer(object):
def serialize(self, obj):
return 'json'
class XMLSerializer(object):
def serialize(self, obj):
return 'xml'
class AtomSerializer(object):
def serialize(self, obj):
return 'atom'
robj = wsgi.ResponseObject({}, code=202,
json=JSONSerializer,
xml=XMLSerializer,
atom=AtomSerializer)
robj['X-header1'] = 'header1'
robj['X-header2'] = 'header2'
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
request = wsgi.Request.blank('/tests/123')
response = robj.serialize(request, content_type)
self.assertEqual(response.headers['Content-Type'], content_type)
self.assertEqual(response.headers['X-header1'], 'header1')
self.assertEqual(response.headers['X-header2'], 'header2')
self.assertEqual(response.status_int, 202)
self.assertEqual(response.body, mtype)
| {
"content_hash": "52d66eaea7699c800adbd2e8d432c019",
"timestamp": "",
"source": "github",
"line_count": 861,
"max_line_length": 79,
"avg_line_length": 34.558652729384434,
"alnum_prop": 0.561351033439758,
"repo_name": "NoBodyCam/TftpPxeBootBareMetal",
"id": "7427eb2afb31d0ebfcf1a158f585029b6fe2b9c6",
"size": "29800",
"binary": false,
"copies": "1",
"ref": "refs/heads/tftp_pxe_boot",
"path": "nova/tests/api/openstack/test_wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6568288"
},
{
"name": "Shell",
"bytes": "17010"
}
],
"symlink_target": ""
} |
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import web
from logging import getLogger
from logging import getLoggerClass
from logging import DEBUG
from logging import Formatter
from logging import INFO
from logging import StreamHandler
__all__ = ['create_logger']
def create_logger():
"""Creates a logger for the given application.
This logger works similar to a regular Python logger but changes the
effective logging level based on the application's debug flag. Furthermore
this function also removes all attached handlers in case there was a logger
with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(self):
if self.level == 0:
return DEBUG if web.config.DEBUG else INFO
return super(DebugLogger, self).getEffectiveLevel()
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record)
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(web.config.LOG_FORMAT))
logger = getLogger(web.config.LOGGER_NAME)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
if web.config.LOG_ENABLE:
logger.addHandler(handler)
return logger
| {
"content_hash": "361312c72bfcda46bb2c4a9a07d0302c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.6953727506426736,
"repo_name": "comick/barduino",
"id": "2dff9a6554430c9a6a707677e7244736aa327878",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/weblib/logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "61962"
},
{
"name": "Python",
"bytes": "48678"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
} |
import pytest
from mock import Mock, patch
from meetingbot.meetings import GoToMeeting
from .fixtures import event, attendees, room, organizer
class TestGoToMeeting:
@pytest.fixture
def gotomeeting(self, event):
return GoToMeeting(event.description)
def test_can_pull_meeting_id_from_description(self, gotomeeting):
assert gotomeeting.id == "MEETING_ID"
def test_link_ends_with_id(self):
gotomeeting = GoToMeeting(meeting_description="")
fake_id = "foo"
gotomeeting._id = fake_id
assert gotomeeting.url.endswith(fake_id)
@patch("webbrowser.open")
def test_opening_link_includes_url(self, open_browser_mock):
gtm = GoToMeeting("")
gtm._id = "foo"
gtm.join()
open_browser_mock.assert_called_once_with(gtm.url)
def test_join_instructions(self, gotomeeting):
assert gotomeeting.url in gotomeeting.join_instructions
| {
"content_hash": "8594a33ce30135896d6051b2b50e4c23",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 30.161290322580644,
"alnum_prop": 0.6844919786096256,
"repo_name": "kevinlondon/meetingbot",
"id": "fce2f38df8a8430a868737d8ee773d728c945f00",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_meetings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24736"
}
],
"symlink_target": ""
} |
"""
URLConf for Django user profile management.
Recommended usage is to use a call to ``include()`` in your project's
root URLConf to include this URLConf for any URL beginning with
'/profiles/'.
"""
from django.conf.urls.defaults import *
from profiles import views
urlpatterns = patterns('',
url(r'^create/$',
views.create_profile,
name='profiles_create_profile'),
url(r'^edit/$',
views.edit_profile,
name='profiles_edit_profile'),
url(r'^(?P<username>\w+)/$',
views.profile_detail,
name='profiles_profile_detail'),
url(r'^$',
views.profile_list,
name='profiles_profile_list'),
)
| {
"content_hash": "8deaa2b18e9b030669514e0fec54157d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 32.82142857142857,
"alnum_prop": 0.4602829162132753,
"repo_name": "clones/django-profiles",
"id": "2dd28b8bd4055aaf46910d958e6c3d39549968f8",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13413"
}
],
"symlink_target": ""
} |
import sys, os, json, re, fnmatch, subprocess
from os.path import isfile, join
def log(level, message):
print "[" + level + "] " + message
def collect_files(path, pattern):
files = [join(path, f) for f in os.listdir(path) if isfile(join(path, f)) ]
files = fnmatch.filter(files, "*." + pattern)
return map(lambda x : os.path.splitext(x)[0], files)
class TestRunner:
def __init__(self, verbose):
self.testcases = None
self.conf = None
self.verbose = verbose
def load_config(self, root, json_path):
if os.path.exists(json_path) == False:
log("warning", json_path + " is not exists")
return False
f = open(json_path, "r")
conf = json.load(f)
ipattern = conf["input.ext"]
opattern = conf["output.ext"]
if conf["peg"] == None:
conf["peg"] = ""
if ipattern == None:
log("warning", "input.ext is not defined")
return False
if opattern == None:
log("info", "output.ext is not defined. use " + ipattern + ".out")
conf["output.ext"] = ipattern + ".out"
conf["peg"] = conf["peg"].replace("#{SOURCE_ROOT}", root)
self.conf = conf
return True
def prepare(self, root, path):
json_path = path + "/run.json"
if self.load_config(root, json_path) == False:
return False
inputs = collect_files(path, self.conf["input.ext"])
inputs.sort();
for f in inputs:
result = f + "." + self.conf["output.ext"]
if not(os.path.exists(result)):
log("warning", result + " is not found")
return False
self.testcases = inputs
return True
def run_once(self, root, peg, input_file):
cmd = ['java', '-ea', '-jar', root + '/libbun2.jar',
'-l', peg, input_file]
print "# " + " ".join(cmd)
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
exit_status = p.wait()
output = "".join(p.stdout.readlines())
errors = "".join(p.stderr.readlines())
return (exit_status, output, errors)
def run(self):
executed = 0
passed = 0
failed_file_names = []
if len(self.testcases) == 0:
return
for f in self.testcases:
input_file = f + "." + self.conf["input.ext"]
output_file = f + "." + self.conf["output.ext"]
syntax_file = self.conf["peg"]
(status, output, erros) = self.run_once(root, syntax_file, input_file)
#if self.verbose:
# print 'return: %d' % (status)
# print 'stdout: %s' % (output, )
# print 'stderr: %s' % (errors, )
executed += 1
expected = open(output_file, "r").read()
if status == 0 and expected == output:
passed += 1
else:
failed_file_names.append(input_file)
if self.verbose:
print '-----------------------'
print 'result:\n%s' % (output)
print '-----------------------'
print 'expected\n%s' % (expected)
print '-----------------------'
return executed, passed, failed_file_names
def printTestResult(executed, passed, failed_file_names):
print "# of testcases: %d, # of OK: %d, # of FAILED: %d" % (
executed, passed, executed - passed)
if executed - passed > 0:
print '\nFAILED File:'
for name in failed_file_names:
print " %s" % (name)
if __name__ == '__main__':
root = os.path.abspath(os.path.dirname(__file__) + "/../../")
dirs = filter(lambda x : x != "tool", os.listdir(root + "/test-peg/"))
target = ""
if len(sys.argv) > 1:
target = sys.argv[1]
dirs = filter(lambda x : x == target, dirs)
executed = 0
passed = 0
failed_file_names = []
for path in dirs:
runner = TestRunner(True)
if runner.prepare(root, root + "/test-peg/" + path):
each_executed, each_passed, each_failed_file_names = runner.run()
executed += each_executed
passed += each_passed
failed_file_names += each_failed_file_names
printTestResult(executed, passed, failed_file_names)
| {
"content_hash": "734cc731f6f6e541fe6242767279afe0",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 82,
"avg_line_length": 35.968,
"alnum_prop": 0.5080071174377224,
"repo_name": "libbun/libbun2",
"id": "fdfbecbd00bd149d7bf4a944bedecffd886dbd2d",
"size": "4519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test-peg/tool/testrunner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5320"
},
{
"name": "Java",
"bytes": "332093"
},
{
"name": "JavaScript",
"bytes": "3897"
},
{
"name": "Python",
"bytes": "6048"
},
{
"name": "Shell",
"bytes": "1525"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.exists('Desk Page'): return
pages = frappe.get_all("Desk Page", filters={ "is_standard": False }, fields=["name", "extends", "for_user"])
default_icon = {}
for page in pages:
if page.extends and page.for_user:
if not default_icon.get(page.extends):
default_icon[page.extends] = frappe.db.get_value("Desk Page", page.extends, "icon")
icon = default_icon.get(page.extends)
frappe.db.set_value("Desk Page", page.name, "icon", icon) | {
"content_hash": "cfa37eaf65adefb5035636175ba2f29d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 110,
"avg_line_length": 35.733333333333334,
"alnum_prop": 0.6884328358208955,
"repo_name": "saurabh6790/frappe",
"id": "93bf5c766e03a95e0c9cf90c2f1c49e4952d4cee",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/patches/v13_0/update_icons_in_customized_desk_pages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63276"
},
{
"name": "HTML",
"bytes": "218921"
},
{
"name": "JavaScript",
"bytes": "2152738"
},
{
"name": "Less",
"bytes": "36947"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3261616"
},
{
"name": "SCSS",
"bytes": "223084"
},
{
"name": "Shell",
"bytes": "3358"
},
{
"name": "Vue",
"bytes": "49860"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^bb/', include('bb.urls')),
url(r'^admin/', include(admin.site.urls))
]
| {
"content_hash": "84a4d2dad68340f84c19cbdab11de514",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 45,
"avg_line_length": 26,
"alnum_prop": 0.6648351648351648,
"repo_name": "yemmitt/bb_website",
"id": "27d29b48938f05ca128224bcf8ab331e7f8ffe4a",
"size": "182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bb_django/mysite/mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "367"
},
{
"name": "HTML",
"bytes": "4519"
},
{
"name": "Python",
"bytes": "42460"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SecurityCenterConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for SecurityCenter.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure subscription ID. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-03-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(SecurityCenterConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-03-01") # type: Literal["2022-03-01"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-security/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "ddd2a703f91da43dc437bd18289edc2d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 107,
"avg_line_length": 51.375,
"alnum_prop": 0.7177615571776156,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f46270fe370c4b17c0d9d3167dfb4e9911fc2b04",
"size": "3756",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/security/azure-mgmt-security/azure/mgmt/security/v2022_03_01/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Unit tests for scrabble.py"""
import unittest
import sys
import os
current_dir = os.getcwd()
src_dir = os.path.join(current_dir, 'scrabble')
tests_dir = os.path.join(current_dir, 'tests')
# add the source directory to the load path
sys.path.append(src_dir)
import scrabble
class TestScrabble(unittest.TestCase):
def setUp(self):
self.words = scrabble.build_wordlist(os.path.join(tests_dir,
'test_wordlist.txt'))
self.letters = 'dogcatrbbit'
def test_wordlist(self):
correct_words = ['dog', 'cat', 'rabbit']
self.assertEquals(self.words, correct_words)
def test_highest(self):
self.assertEquals(scrabble.find_highest(self.letters,
self.words), 'rabbit')
def test_wordscore(self):
self.assertEquals(scrabble.get_word_score('faze'), 16)
self.assertEquals(scrabble.get_word_score('fiz'), 15)
self.assertEquals(scrabble.get_word_score('ben'), 5)
def test_creatable(self):
self.assertTrue(scrabble.creatable('hat', 'aahhtt'))
self.assertFalse(scrabble.creatable('noon', 'nott'))
def test_all_scores(self):
ans = [('rabbit', 10), ('dog', 5), ('cat', 5)]
self.assertEquals(scrabble.all_scores(self.letters, self.words), ans)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0c28759563c0f84516a8c595c6a16581",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6065340909090909,
"repo_name": "benosment/scrabble",
"id": "e30bf8e00615f39e87c0b768a55d87be28a6e925",
"size": "1474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_scrabble.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1225"
}
],
"symlink_target": ""
} |
import unittest
from Crypto.Util import strxor
import stringlib
def solve(crypt_msg):
max_score, decoder, secret = 0, None, None
unhex_msg = stringlib.decode_hex(crypt_msg)
for c in range(256):
dec_msg = strxor.strxor_c(unhex_msg, c)
score = stringlib.score(dec_msg)
if score > max_score:
max_score, decoder, secret = score, c, dec_msg
return secret, max_score, decoder
class Test(unittest.TestCase):
def test_solve(self):
test_in = b"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
test_out = b"Cooking MC's like a pound of bacon"
self.assertEqual(solve(test_in)[0], test_out)
| {
"content_hash": "5d9b23055b638d94a064833aa07f4a16",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 89,
"avg_line_length": 28.625,
"alnum_prop": 0.6724890829694323,
"repo_name": "Renelvon/matasano",
"id": "bc9d64028c4d191d30aeea1827efba5e20df8d31",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17149"
}
],
"symlink_target": ""
} |
"""
pyQms
-----
Python module for fast and accurate mass spectrometry data quantification
:license: MIT, see LICENSE.txt for more details
Authors:
* Leufken, J.
* Niehues, A.
* Sarin, L.P.
* Hippler, M.
* Leidel, S.A.
* Fufezan, C.
"""
import pyqms
import sys
import pickle
import os
import pyqms.adaptors
try:
import pymzml
except:
print("Please install pymzML via: pip install pymzml")
def main(ident_file=None, mzml_file=None):
"""
Script to automatically parse `Ursgal`_ result files and quantify it via
pyQms. Please refer to Documenation of :doc:`adaptors` for further
information.
`Ursgal`_ result files or files in `mzTab` format are read in and used for
quantification of the BSA example file.
Note:
Use e.g. the BSA1.mzML example file. Please download it first using
'get_example_BSA_file.py'. Evidence files can also be found in the
data folder 'BSA1_omssa_2_1_9_unified.csv' or 'BSA1_omssa_2_1_9.mztab'
Usage:
./parse_ident_file_and_quantify.py <ident_file> <mzml_file>
.. _Ursgal:
https://github.com/ursgal/ursgal
.. _mzTab:
http://www.psidev.info/mztab
"""
if ident_file.upper().endswith("MZTAB"):
evidence_score_field = "search_engine_score[1]"
else:
# this is the default value in the adaptor
evidence_score_field = "PEP"
print('Evidence score field "{0}" will be used.'.format(evidence_score_field))
fixed_labels, evidences, molecules = pyqms.adaptors.parse_evidence(
fixed_labels=None,
evidence_files=[ident_file],
evidence_score_field=evidence_score_field,
)
params = {
"molecules": molecules,
"charges": [1, 2, 3, 4, 5],
"metabolic_labels": {"15N": [0]},
"fixed_labels": fixed_labels,
"verbose": True,
"evidences": evidences,
}
lib = pyqms.IsotopologueLibrary(**params)
run = pymzml.run.Reader(mzml_file)
out_folder = os.path.dirname(mzml_file)
mzml_file_basename = os.path.basename(mzml_file)
results = None
for spectrum in run:
try:
# pymzML 2.0.0 style
scan_time = spectrum.scan_time
except:
# scan time will be in seconds
scan_time = spectrum.get("MS:1000016")
if spectrum["ms level"] == 1:
results = lib.match_all(
mz_i_list=spectrum.centroidedPeaks,
file_name=mzml_file_basename,
spec_id=spectrum["id"],
spec_rt=scan_time,
results=results,
)
pickle.dump(
results,
open(
os.path.join(
out_folder, "{0}_pyQms_results.pkl".format(mzml_file_basename)
),
"wb",
),
)
return
if __name__ == "__main__":
if len(sys.argv) < 3:
print(main.__doc__)
else:
main(ident_file=sys.argv[1], mzml_file=sys.argv[2])
| {
"content_hash": "926953660526f97c6fe894a7c2eb5180",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 82,
"avg_line_length": 25.838983050847457,
"alnum_prop": 0.5762545096753033,
"repo_name": "pyQms/pyqms",
"id": "bea3e660b99c8d20607e7eefb1040ee9317eeb8b",
"size": "3090",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "example_scripts/parse_ident_file_and_quantify.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "353309"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
} |
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
@dataclass
class ChrFScorerConfig(FairseqDataclass):
pass
@register_scorer("chrf", dataclass=ChrFScorerConfig)
class ChrFScorer(BaseScorer):
def __init__(self, args):
super(ChrFScorer, self).__init__(args)
import sacrebleu
self.sacrebleu = sacrebleu
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format()
| {
"content_hash": "cf2388571c580d029852bfc11ecb94c5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.6851612903225807,
"repo_name": "pytorch/fairseq",
"id": "5df5a1c011243fe2e836c38a5f8459aeb824f0e7",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/scoring/chrf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
import argparse
import rank_serializer as RS
import bit_serializer as bs
import min_space_v2 as min_space
import decode_to_list as szhang_deserialize
# import min_space
ds = RS.DiffByteRankSerializer()
def main(args):
count = 0
print('RAW, Msgpack, DiffBytes, DiffBits, DiffBitsNoZip, Min')
with open(args.rank_data_file) as f:
last_key, buffer = None, []
for line in f:
if line.startswith('0000'):
count += 1
if buffer and last_key:
L = ds.deserialize(''.join(buffer))
# print(L)
s1 = len(ds.serialize(L))
s2 = len(bs.serialize(L))
s3 = len(bs.serialize(L, zip=False))
s4_result = min_space.serialize(L, zip=False)
s4 = len(s4_result)
decode_s4_result = szhang_deserialize.deserialize(s4_result, zip=False)
assert L == decode_s4_result, 'szhang decode error L:%s != decode_s4_result:%s' % (L, decode_s4_result)
if s4 and s4 == min(s1, s2, s3, s4):
flag = 'yeah!'
else:
flag = 'no'
print('{}, {}, {}, {}, {}'.format(s1, s2, s3, s4, flag))
if args.limit and count >= args.limit:
exit(0)
buffer = []
last_key = line[:28]
buffer.append(line[29:])
else:
buffer.append(line)
print 'total count:', count
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--rank-data-file', type=str)
parser.add_argument('-n', '--limit', type=int, default=0)
args = parser.parse_args()
main(args)
| {
"content_hash": "a3765a11ddb41450e63184a693c3880b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 124,
"avg_line_length": 36.56,
"alnum_prop": 0.4934354485776805,
"repo_name": "szhang-aa/hbase_save_store",
"id": "293f6f1d30039be2ed39fc74941aace181de5cd7",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26035"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse
import json,datetime
from apps.message.models import News
from apps.game.models import Game,PlayerGameProfile,TeamGameProfile
from apps.team.models import Team,TeamProfile,Player,PlayerProfile
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login
from operator import attrgetter
# Create your views here.
def welcome(request):
news = News.objects.all().order_by("-create_time")[0:5]
today = datetime.date.today()
next_day = today+ datetime.timedelta(days=1)
prev_day = today - datetime.timedelta(days=1)
today_game = Game.objects.filter(game_date=today.strftime("%Y-%m-%d")).order_by("game_time")
next_day_game = Game.objects.filter(game_date=next_day.strftime("%Y-%m-%d")).order_by("game_time")
prev_day_game = Game.objects.filter(game_date=prev_day.strftime("%Y-%m-%d")).order_by("game_time")
teamsProfile = TeamProfile.objects.all().order_by("-win_rate","-game")[0:5]
teams = getModelByIdCode(teamsProfile,"TeamProfile")
querySet = PlayerProfile.objects.all()
pointPlayer = getModelByIdCode(sorted(querySet,key=lambda s:s.avg_point,reverse=True)[0:5],"PlayerProfile","point")
assistPlayer = getModelByIdCode(sorted(querySet,key=lambda s:s.avg_assist,reverse=True)[0:5],"PlayerProfile","assist")
reboundPlayer = getModelByIdCode(sorted(querySet,key=lambda s:s.avg_rebound,reverse=True)[0:5],"PlayerProfile","rebound")
blockPlayer = getModelByIdCode(sorted(querySet,key=lambda s:s.avg_block,reverse=True)[0:5],"PlayerProfile","block")
stealPlayer = getModelByIdCode(sorted(querySet,key=lambda s:s.avg_steal,reverse=True)[0:5],"PlayerProfile","steal")
return render(request,"index.html",{
"todayGame":today_game,
"nextDayGame":next_day_game,
"prevDayGame":prev_day_game,
"news":news,
"teams":teams,
"pointPlayer":pointPlayer,
"assistPlayer":assistPlayer,
"reboundPlayer":reboundPlayer,
"blockPlayer":blockPlayer,
"stealPlayer":stealPlayer
})
def getModelByIdCode(objlist,modelType,sortType=None):
result = []
if len(objlist)==0:
return result
if modelType == 'TeamProfile':
for each in objlist:
team = Team.objects.get(id_code=each.id_code)
team.win_rate = each.win_rate
team.game = each.game
result.append(team)
elif modelType == 'PlayerProfile':
for each in objlist:
player = Player.objects.get(id_code=each.id_code)
if each.game != 0 :
setattr(player,sortType,getattr(each,sortType)/each.game)
result.append(player)
return result | {
"content_hash": "53fcdaaf1f4832d93bfb83503124ec8a",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 125,
"avg_line_length": 48.08620689655172,
"alnum_prop": 0.6715668698458229,
"repo_name": "zWingz/webbasketball",
"id": "eeeededb969c33da40af5b4c0a712322683d2ba0",
"size": "2789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/welcome/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50498"
},
{
"name": "HTML",
"bytes": "170566"
},
{
"name": "JavaScript",
"bytes": "30741"
},
{
"name": "Python",
"bytes": "106971"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.