content
stringlengths 5
1.05M
|
---|
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents an index on a Model."""
from typing import List, Optional
from spanner_orm import error
class Index(object):
"""Represents an index on a Model."""
PRIMARY_INDEX = 'PRIMARY_KEY'
def __init__(self,
columns: List[str],
parent: Optional[str] = None,
null_filtered: bool = False,
unique: bool = False,
storing_columns: Optional[List[str]] = None):
if not columns:
raise error.ValidationError('An index must have at least one column')
self.columns = columns
self.name = None
self.parent = parent
self.null_filtered = null_filtered
self.unique = unique
self.storing_columns = storing_columns or []
@property
def primary(self) -> bool:
return self.name == self.PRIMARY_INDEX
|
'''
One-off script to look at embedding results for a set of script
'''
import argparse, os, pickle, random, sys, time
import numpy as np
import torch
import scipy.io
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
sys.path.append("..")
from DL.utils import *
from DL.networks import *
from Database.DB_models import *
from DL.sqlite_data_loader import SQLDataLoader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--db_name', default='missouricameratraps', type=str, help='Name of the training (target) data Postgres DB.')
parser.add_argument('--db_user', default='user', type=str, help='Name of the user accessing the Postgres DB.')
parser.add_argument('--db_password', default='password', type=str, help='Password of the user accessing the Postgres DB.')
parser.add_argument('--num', default=2500, type=int, help='Number of samples to draw from dataset to get embedding features.')
parser.add_argument('--crop_dir', type=str, help='Path to directory with cropped images to get embedding features for.')
parser.add_argument('--base_model', type=str, help='Path to latest embedding model checkpoint.')
parser.add_argument('--random_seed', default=1234, type=int, help='Random seed to get same samples from database.')
parser.add_argument('--output_dir', type=str, help='Output directory for subset of crops')
args = parser.parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
BASE_MODEL = args.base_model
DB_NAME = args.db_name
USER = args.db_user
PASSWORD = args.db_password
# Connect to database and sample a dataset
target_db = PostgresqlDatabase(DB_NAME, user=USER, password=PASSWORD, host='localhost')
target_db.connect(reuse_if_open=True)
db_proxy.initialize(target_db)
dataset_query = Detection.select(Detection.image_id, Oracle.label, Detection.kind).join(Oracle).limit(args.num)
dataset = SQLDataLoader(args.crop_dir, query=dataset_query, is_training=False, kind=DetectionKind.ModelDetection.value, num_workers=8, limit=args.num)
imagepaths = dataset.getallpaths()
# Load the saved embedding model from the checkpoint
checkpoint = load_checkpoint(BASE_MODEL)
if checkpoint['loss_type'].lower() == 'center' or checkpoint['loss_type'].lower() == 'softmax':
embedding_net = SoftmaxNet(checkpoint['arch'], checkpoint['feat_dim'], checkpoint['num_classes'], False)
else:
embedding_net = NormalizedEmbeddingNet(checkpoint['arch'], checkpoint['feat_dim'], False)
model = torch.nn.DataParallel(embedding_net).cuda()
model.load_state_dict(checkpoint['state_dict'])
# Update the dataset embedding
dataset.updateEmbedding(model)
X_train = dataset.em[range(len(dataset))]
y_train = np.asarray(dataset.getalllabels())
imagepaths = dataset.getallpaths()
datasetindices = list(range(len(dataset)))
sample_features = np.array([]).reshape(0, 256)
sample_labels = []
sample_images = []
for idx in datasetindices:
sample_features = np.vstack([sample_features, X_train[idx]])
sample_labels.append(y_train[idx])
img_path = imagepaths[idx].split('.JPG')[0]
image = dataset.loader(img_path)
sample_images.append(image)
# save the images
for idx in datasetindices:
img_path = imagepaths[idx].split('.JPG')[0]
image = dataset.loader(img_path)
os.makedirs(os.path.join(args.output_dir, 'crops'), exist_ok=True)
image.save(os.path.join(args.output_dir, 'crops', '%d.JPG'%idx))
# save the features
# with open(os.path.join(args.output_dir, 'lastlayer_features.mat'), 'wb') as f:
# pickle.dump(sample_features, f)
# with open(os.path.join(args.output_dir, 'labels.mat'), 'wb') as f:
# pickle.dump(sample_labels, f)
with open(os.path.join(args.output_dir, 'lastlayer_features_and_labels.mat'), 'wb') as f:
scipy.io.savemat(f, mdict={'features': sample_features, 'labels': sample_labels})
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/Subscription) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .contactpoint import ContactPoint
from .domainresource import DomainResource
from .fhirdate import FHIRDate
@dataclass
class SubscriptionChannel(BackboneElement):
""" The channel on which to report matches to the criteria.
Details where to send notifications when resources are received that meet
the criteria.
"""
resource_type: ClassVar[str] = "SubscriptionChannel"
type: str = None
endpoint: Optional[str] = None
payload: Optional[str] = None
header: Optional[List[str]] = None
@dataclass
class Subscription(DomainResource):
""" Server push subscription criteria.
The subscription resource is used to define a push-based subscription from
a server to another system. Once a subscription is registered with the
server, the server checks every resource that is created or updated, and if
the resource matches the given criteria, it sends a message on the defined
"channel" so that another system can take an appropriate action.
"""
resource_type: ClassVar[str] = "Subscription"
status: str = None
contact: Optional[List[ContactPoint]] = None
end: Optional[FHIRDate] = None
reason: str = None
criteria: str = None
error: Optional[str] = None
channel: SubscriptionChannel = None |
#!/bin/false
# Copyright (c) 2022 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
from typing import Any, Dict
from sidein.providers.DependencyProviderInterface import DependencyProviderInterface
class SimpleContainerInterface(DependencyProviderInterface, metaclass=abc.ABCMeta):
"""
A dependency provider implementation which stores dependencies in an internal dictionary.
"""
# NOTE: As you might've noticed, there is no dependency_exists() method in this interface. The reason for that is
# that it would encourage its thread-unsafe use. Consider the following code:
#
# if container.dependency_exists(name):
# container.remove_dependency(name)
#
# If two threads executed this piece of code at the same time, a race condition could occur (while the methods
# exported by this interface are guaranteed to be thread-safe, this doesn't mean they cannot be used wrongly).
# For this reason, it's reasonable to think of a better, thread-safe method of achieving your goal - a method which
# doesn't check the presence of a dependency at all; for example, instead of the above code snippet, you could
# write this:
#
# try:
# container.remove_dependency(name) # This method *itself* is guaranteed to be thread-safe
# except DependencyProviderException:
# pass
__slots__ = ()
@abc.abstractmethod
def get_dependency(self, name: str) -> Any:
"""
Returns the dependency named 'name' from the dependency container.
:param name: The requested dependency's name.
:return: The dependency named 'name'.
:raises DependencyInSCNotFoundException: If the requested dependency isn't present in the dependency container. (DependencyInSCNotFoundException is a subclass of DependencyProviderException!)
"""
raise NotImplementedError(SimpleContainerInterface.get_dependency.__qualname__)
@abc.abstractmethod
def get_all_dependencies(self) -> Dict[str, Any]:
"""
Returns all the dependencies stored in the dependency container in a {name: dependency} dictionary.
:return: All the dependencies stored in the dependency container.
"""
raise NotImplementedError(SimpleContainerInterface.get_all_dependencies.__qualname__)
@abc.abstractmethod
def add_dependency(self, name: str, dependency: Any) -> None:
"""
Adds the dependency 'dependency' to the dependency container under the name 'name'.
:param name: The added dependency's name.
:param dependency: The added dependency.
:raises DependencyInSCExistsException: If the added dependency is already present in the dependency container.
"""
raise NotImplementedError(SimpleContainerInterface.add_dependency.__qualname__)
@abc.abstractmethod
def replace_dependency(self, name: str, dependency: Any) -> None:
"""
Replaces the already existing dependency named 'name' with the new dependency 'dependency' in the dependency container.
:param name: The replaced dependency's name.
:param dependency: The new dependency to replace the old dependency with.
:raises DependencyInSCNotFoundException: If the replaced dependency isn't present in the dependency container.
"""
raise NotImplementedError(SimpleContainerInterface.replace_dependency.__qualname__)
@abc.abstractmethod
def add_or_replace_dependency(self, name: str, dependency: Any) -> bool:
"""
Adds the dependency 'dependency' to the dependency container under the name 'name', if it isn't present there.
Otherwise, the old dependency is replaced with the new one.
:param name: The added or replaced dependency's name.
:param dependency: The new dependency to add or to replace the old dependency with.
:return: True if the dependency is replaced, False if it is added.
"""
raise NotImplementedError(SimpleContainerInterface.add_or_replace_dependency.__qualname__)
@abc.abstractmethod
def remove_dependency(self, name: str) -> None:
"""
Removes the dependency named 'name' from the dependency container.
:param name: The removed dependency's name.
:raises DependencyInSCNotFoundException: If the removed dependency isn't present in the dependency container.
"""
raise NotImplementedError(SimpleContainerInterface.remove_dependency.__qualname__)
@abc.abstractmethod
def remove_all_dependencies(self) -> None:
"""
Removes all the dependencies stored in the dependency container.
"""
raise NotImplementedError(SimpleContainerInterface.remove_all_dependencies.__qualname__)
|
import shutil
from tempfile import mkdtemp, mktemp
import os
from zipfile import ZipFile
from AnkiTools.excel import AnkiExcelSync
class AnkiFormatEditor:
def __init__(self):
self.tempdir = mkdtemp()
def convert(self, in_file, out_file=None, out_format=None):
in_file_type = os.path.splitext(in_file)[1]
if out_format is None:
assert out_file is not None, "Either out_file or out_format must be specified."
out_file_type = os.path.splitext(out_file)[1]
else:
if out_format[0] == '.':
out_file_type = out_format
else:
out_file_type = '.' + out_format
if out_file is not None:
out_file_header = os.path.splitext(out_file)[0]
else:
out_file_header = os.path.splitext(in_file)[0]
out_file = '{}{}'.format(out_file_header, out_file_type)
assert in_file_type != out_file_type, 'File types must be different'
conversion = (in_file_type, out_file_type)
if conversion == ('.apkg', '.anki2'):
self.unzip(in_file, out_file=out_file)
elif conversion == ('.apkg', '.xlsx'):
self.export_anki_sqlite(self.unzip(in_file,
os.path.join(self.tempdir, mktemp())),
out_file)
elif conversion == ('.anki2', '.apkg'):
self.zip(in_file, out_file)
elif conversion == ('.anki2', '.xlsx'):
self.export_anki_sqlite(in_file, out_file)
elif conversion == ('.xlsx', '.anki2'):
self.import_anki_sqlite(in_file, out_file, out_path='')
elif conversion == ('.xlsx', '.apkg'):
self.zip(self.import_anki_sqlite(in_file), out_file)
else:
raise Exception("Unsupported conversion.")
def unzip(self, in_file, out_file):
with ZipFile(in_file) as zf:
zf.extract('collection.anki2', path=self.tempdir)
shutil.move(os.path.join(self.tempdir, 'collection.anki2'),
out_file)
return out_file
@staticmethod
def zip(in_file, out_file):
with ZipFile(out_file, 'w') as zf:
zf.write(in_file, arcname='collection.anki2')
zf.writestr('media', '{}')
@staticmethod
def export_anki_sqlite(in_file, out_file):
with AnkiExcelSync(anki_database=in_file, excel_filename=out_file) as sync_portal:
sync_portal.to_excel()
def import_anki_sqlite(self, in_file, out_file=None, out_path=''):
if out_file is None:
out_file = os.path.join(self.tempdir, 'collection.anki2')
with AnkiExcelSync(anki_database=out_file, excel_filename=in_file, read_only=True) as sync_portal:
sync_portal.to_sqlite()
return os.path.join(out_path, out_file)
def anki_convert(in_file, out_file=None, out_format=None, out_path=None):
AnkiFormatEditor().convert(in_file, out_file, out_format)
|
"""
ObjectiveGenerator constructs 3 types of optimization objectives
- Risk-only (Volatility, Variance, Skewness, Kurtosis, Higher Normalized Moments, Market Neutral)
- Risk Reward (Expected Return, Efficient Frontier, Sharpe, Sortino, Beta, Treynor, Jenson's Alpha)
- Numerical (Inverse Volatility, Variance, Equal Weight, Market Cap Weight)
"""
import numpy as np
from .Metrics import MetricGenerator
class ObjectiveGenerator(MetricGenerator):
def __init__(self, ret_vec, moment_mat, moment, assets, beta_vec):
"""
Initialize an ObjectiveGenerator class with parameters to construct objectives
Parameters are identical to its parent class MetricGenerator
"""
super().__init__(ret_vec, moment_mat, moment, assets, beta_vec)
self.method_dict = {"efficient_frontier": self.efficient_frontier,
"equal_risk_parity": self.equal_risk_parity,
"min_correlation": self.min_correlation,
"min_volatility": self.min_volatility,
"min_variance": self.min_moment,
"min_skew": self.min_moment,
"min_kurt": self.min_moment,
"min_moment": self.min_moment,
"max_return": self.max_return,
"max_diversification": self.max_diversification,
"max_sharpe": self.max_sharpe,
"min_beta": self.min_beta,
"max_treynor": self.max_treynor,
"max_jenson_alpha": self.max_jenson_alpha,
"inverse_volatility": self.inverse_volatility,
"inverse_variance": self.inverse_variance,
"equal_weight": self.equal_weight,
"market_cap_weight": self.market_cap_weight}
def create_objective(self, objective_type, **kwargs):
"""
Universal method for creating an objective
:param objective_type: str, options are listed in ObjectiveGenerator.method_dict
:param kwargs: arguments to be passed in to construct objectives
:return: func/np.ndarray (if weight construction is purely numerical, then return the weight vector, else return a function)
"""
if objective_type in ["equal_weight", "market_cap_weight", "inverse_volatility", "inverse_variance"]:
return self.method_dict[objective_type](**kwargs)
return self.method_dict[objective_type]
# Risk Related
def equal_risk_parity(self, w):
"""
Objective: Individual Portfolios Contribute Equal amount of risk to the portfolio
:param w: np.ndarray, weight vector
:return: float
"""
return self.risk_parity(w)
def min_correlation(self, w):
"""
Objective: Minimize Portfolio Correlation Factor
:param w: np.ndarray, weight vector
:return: float
"""
return self.correlation(w)
def min_volatility(self, w):
"""
Objective: Minimize Portfolio Volatility
:param w: np.ndarray, weight vector
:return: float
"""
return self.volatility(w)
def min_moment(self, w):
"""
Objective: Minimize Portfolio Moment (Variance if moment=2, Skewness if moment=3, Kurtosis if moment=4)
:param w: np.ndarray, weight vector
:return: float
"""
return self.higher_moment(w)
def max_diversification(self, w):
"""
Objective: Maximize Portfolio Diversification Factor
:param w: np.ndarray, weight vector
:return: float
"""
return -self.diversification(w)
def efficient_frontier(self, w, aversion):
"""
Objective: Maximize return with lowest variance (Classic Mean Variance Optimization)
:param w: np.ndarray, weight vector
:param aversion: float, risk aversion factor
:return: float
"""
return -(self.expected_return(w) - 0.5 * aversion * self.higher_moment(w))
def max_return(self, w):
"""
Objective: Maximize Return
:param w: np.ndarray, weight vector
:return: float
"""
return -self.expected_return(w)
def max_sharpe(self, w, risk_free):
"""
Objective: Maximize Sharpe
:param w: np.ndarray, weight vector
:param risk_free: float, risk free rate of return. Must be positive
:return: float
"""
return -self.sharpe(w, risk_free)
def min_beta(self, w):
"""
Objective: Minimize Absolute Beta (Close to 0)
:param w: np.ndarray, weight vector
:return: float
"""
return np.sqrt(np.square(self.beta(w)))
def max_treynor(self, w, risk_free):
"""
Objective: Maximize Treynor Ratio
:param w: np.ndarray, weight vector
:param risk_free: float, risk free rate of return
:return: float
"""
return -self.treynor(w, risk_free)
def max_jenson_alpha(self, w, risk_free, market_return):
"""
Objective: Maximizes Jenson's Alpha
:param w: np.ndarray, weight vector
:param risk_free: float, risk free rate of return
:param market_return: float, assumed market rate of return
:return: float
"""
return -self.jenson_alpha(w, risk_free, market_return)
|
from typing import Dict, Any
import json
import logging
import os
import boto3
from utils.parse_user_id import parse_user_id
from aws_xray_sdk.core import patch_all
patch_all()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
def put_handler(event: Dict[str, Any], context):
data = json.loads(event['body'])
errors = []
if 'title' not in data:
errors.append("Missing required text field.")
if 'year' not in data:
errors.append("Missing required year field.")
if 'rating' not in data:
errors.append("Missing required rating field")
if len(errors) > 0:
logging.error(f"Validation Failed for request: {event['body']}")
return {
"statusCode": 400,
"body": json.dumps({"errors": errors})
}
headers = event['headers']
user_id = parse_user_id(headers['authorization'][len('Bearer '):])
movie_item = put_movie(user_id, data['title'], int(data['year']), data['plot'], int(data['rating']))
# logging.info(f"Add {json.dumps(item)}")
# write the todo to the database
# response = table.put_item(Item=item, ReturnValues='ALL_OLD')
# logging.info(f"PutItem response: {response['ResponseMetadata'].keys()}")
# PutItem response: dict_keys(['RequestId', 'HTTPStatusCode', 'HTTPHeaders', 'RetryAttempts']
# create a response
response = {
"statusCode": 200,
"body": json.dumps({'item': movie_item})
}
return response
def put_movie(user_id, title, year, plot, rating): # , dynamodb=None):
# if not dynamodb:
# dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
table = dynamodb.Table(os.environ['MOVIES_DYNAMODB_TABLE'])
movie_item = {
'title': title,
'user_id': user_id,
'info': {
'year': year,
'plot': plot,
'rating': rating
}
}
_ = table.put_item(
Item=movie_item
)
return movie_item
# if __name__ == '__main__':
# movie_resp = put_movie("The Big New Movie", 2015,
# "Nothing happens at all.", 0)
# print("Put movie succeeded:")
# pprint(movie_resp, sort_dicts=False)
|
# -*- coding: utf-8 -*-
"""
**views.jobs**
================
This module contains the jobs function that handles requests for the jobs
endpoint of the mycroft service.
"""
from simplejson.decoder import JSONDecodeError
from staticconf import read_string
from pyramid.view import view_config
from mycroft.logic.job_actions import list_all_jobs
from mycroft.logic.job_actions import list_jobs_by_name
from mycroft.logic.job_actions import list_jobs_by_name_version
from mycroft.logic.job_actions import post_job
from mycroft.logic.job_actions import put_job
from mycroft.models.aws_connections import TableConnection
from mycroft.models.abstract_records import PrimaryKeyError
from mycroft.backend.sqs_wrapper import SQSWrapper
def get_scanner_queue(etl_type):
"""
Return the scanner sqs for jobs to send a message when post a job
to wake up the scanner
:param etl_type: et or load
:type etl_type: string in ['et', 'load']
"""
return SQSWrapper(read_string("sqs.{0}_scanner_queue_name".format(etl_type)))
@view_config(route_name='api.jobs', request_method='GET', renderer='json_with_status')
@view_config(route_name='api.jobs', request_method='POST', renderer='json_with_status')
def jobs(request):
"""
jobs_name_and_version handles requests from the jobs endpoint with
log_name and log_version, getting contents from the dynamo location
**GET /v1/jobs/**
Example: ``/v1/jobs/``
*Example Response* ::
[
{'log_name': 'ad_click',
'log_schema_version': 'initial',
's3_log_uri': http://ad_click/schema.yaml?Signature=b?Expires=c?AccessKeyId=xxx
'start_date': '2014-05-01',
'end_date': '',
'contact_emails': ['[email protected]', '[email protected]'],
'redshift_id': 'abc123',
'additional_arguments': '{"et_step": ["--force-et"]}'
},
{'log_name': 'ad_click',
'log_schema_version': 'minimal',
's3_log_uri': http://ad_min/schema.yaml?Signature=b?Expires=b?AccessKeyId=yyy
'start_date': '2014-05-01',
'end_date': '2014-05-07',
'contact_emails': ['[email protected]', '[email protected]'],
'redshift_id': 'abc123'
'additional_arguments': '{"et_step": ["--force-et"]}'
},
{'log_name': 'bing_geocoder',
'log_schema_version': 'bing2',
's3_log_uri': http://bing/schema.yaml?Signature=b?Expires=a?AccessKeyId=zzz
'start_date': '2014-05-02',
'end_date': '2014-06-07',
'contact_emails': ['[email protected]', '[email protected]'],
'redshift_id': 'abc123'
'additional_arguments': '{"et_step": ["--force-et"]}'
}
]
============ ===========
Status Code Description
============ ===========
**200** Success
**500** unknown exception
============ ===========
* **Encoding type:** *application/json*
**POST /v1/jobs/**
Example: ``v1/jobs``
**Query Parameters:**
* **request.body** -- the json string of job details
*Example request.body* ::
"{ 'log_name': 'ad_click',
'log_schema_version': 'initial',
's3_log_uri': 'llll',
'start_date': '2014-04-01',
'end_date': '',
'contact_emails': ['[email protected]', '[email protected]'],
'redshift_id': 'rs1',
'additional_arguments': '{"load_step": ["--force-load"]}'
}"
============ ===========
Status Code Description
============ ===========
**200** Success
**400** bad hash_key: redshift_id, log_name,
log_schema_version and start_date must all be present
**404** invalid job parameters
**500** unknown exception
============ ===========
* **Encoding type:** *application/json*
"""
try:
if request.method == "POST":
return 200, post_job(TableConnection.get_connection('ScheduledJobs'),
get_scanner_queue('et'),
request.body)
elif request.method == "GET":
return 200, list_all_jobs(TableConnection.get_connection('ScheduledJobs'))
except PrimaryKeyError as e:
return 400, {'error': 'bad hash_key'}
except ValueError as e:
if "ConditionalCheckFailedException" in repr(e):
return 404, {'error': "ConditionalCheckFailed; possible duplicate job. \
Delete existing job first"}
return 404, {'error': repr(e)}
except Exception as unknown_exception:
return 500, {'error': repr(unknown_exception)}
@view_config(route_name='api.jobs_log_name', request_method='GET',
renderer='json_with_status')
@view_config(route_name='api.jobs_log_name_version', request_method='GET',
renderer='json_with_status')
def jobs_filtered(request):
"""
jobs_filtered handles requests from the jobs endpoint with a log_name and
optional version. If there's no version all jobs will be for the given
log_name will be returned, otherwise all jobs for the log name and version
combination will be returned.
**GET /v1/jobs/**\ *{string: log_name}*
**Query Parameters:**
* **log_name** - the name of the log for which we want to see jobs
Example: ``/v1/jobs/ad_click``
*Example Response* ::
[
{'log_name': 'ad_click',
'log_schema_version': 'initial',
's3_log_uri': http://ad_click/schema.yaml?Signature=b?Expires=c?AccessKeyId=xxx
'start_date': '2014-05-01',
'end_date': '',
'contact_emails': ['[email protected]', '[email protected]'],
'redshift_id': 'abc123',
'additional_arguments': '{"load_step": ["--force-load"]}'
},
{'log_name': 'ad_click',
'log_schema_version': 'minimal',
's3_log_uri': http://ad_min/schema.yaml?Signature=b?Expires=b?AccessKeyId=yyy
'start_date': '2014-05-01',
'end_date': '2014-05-07',
'contact_emails': ['[email protected]', '[email protected]'],
'redshift_id': 'abc123',
'additional_arguments': '{"load_step": ["--force-load"]}'
}
]
============ ===========
Status Code Description
============ ===========
**200** Success
**404** invalid log_name
**500** unknown exception
============ ===========
**GET /v1/jobs/**\ *{string: log_name}/{string: log_schema_version}*
**Query Parameters:**
* **log_name** - the name of the log for which we want to see jobs
* **log_schema_version** - the version of the log for which we want to see jobs
Example: ``/v1/jobs/ad_click/initial``
*Example Response* ::
[
{'log_name': 'ad_click',
'log_schema_version': 'initial',
's3_log_uri': http://ad_click/schema.yaml?Signature=b?Expires=c?AccessKeyId=xxx
'start_date': '2014-05-01',
'end_date': '',
'emails': ['[email protected]', '[email protected]'],
'redshift_id': 'abc123',
'additional_arguments': '{"et_step": ["--force-et"]}'
}
]
============ ===========
Status Code Description
============ ===========
**200** Success
**404** invalid log_name or log_version
**500** unknown exception
============ ===========
* **Encoding type:** *application/json*
"""
log_name = request.matchdict.get('log_name')
log_version = request.matchdict.get('log_schema_version', None)
try:
if log_version is None:
return 200, list_jobs_by_name(log_name,
TableConnection.get_connection('ScheduledJobs'))
return 200, list_jobs_by_name_version(log_name,
log_version,
TableConnection.get_connection('ScheduledJobs'))
except ValueError as e:
return 404, {'error': repr(e)}
except Exception as unknown_exception:
return 500, {'error': repr(unknown_exception)}
@view_config(route_name='api.jobs_job_id', request_method='PUT', renderer='json_with_status')
def jobs_update_job(request):
"""
jobs_update_job_by_job_id handles requests from the jobs endpoint.
**PUT /v1/jobs/job/**
Example: ``v1/jobs/job/``
**Query Parameters:**
* **request.body** -- the json string of job details
*Example request.body* ::
"{ 'log_name': 'ad_click',
'log_schema_version': 'initial',
'start_date': '2014-04-01',
'end_date': '',
'redshift_id': 'rs1',
'cancel_requested': True,
}"
============ ===========
Status Code Description
============ ===========
**200** Success
**400** bad hash_key: redshift_id, log_name,
log_schema_version and start_date must all be present
**404** invalid job parameters
**500** unknown exception
============ ===========
* **Encoding type:** *application/json*
"""
try:
return 200, put_job(TableConnection.get_connection('ScheduledJobs'),
get_scanner_queue('et'),
request.body)
except PrimaryKeyError as e:
return 400, {'error': 'bad hash_key'}
except JSONDecodeError as e:
return 400, {'error': 'json decode error'}
except ValueError as e:
return 404, {'error': repr(e)}
except Exception as unknown_exception:
return 500, {'error': repr(unknown_exception)}
|
import math
def do_naive_bayes_prediction(x, observed_class_distribution: dict, splitters: dict):
"""Perform Naive Bayes prediction
Parameters
----------
x
The feature values.
observed_class_distribution
Observed class distribution.
splitters
Attribute (features) observers.
Returns
-------
The probabilities related to each class.
Notes
-----
This method is not intended to be used as a stand-alone method.
"""
total_weight = sum(observed_class_distribution.values())
if not observed_class_distribution or total_weight == 0:
# No observed class distributions, all classes equal
return None
votes = {}
for class_index, class_weight in observed_class_distribution.items():
# Prior
if class_weight > 0:
votes[class_index] = math.log(class_weight / total_weight)
else:
votes[class_index] = 0.0
continue
if splitters:
for att_idx in splitters:
if att_idx not in x:
continue
obs = splitters[att_idx]
# Prior plus the log likelihood
tmp = obs.cond_proba(x[att_idx], class_index)
votes[class_index] += math.log(tmp) if tmp > 0 else 0.0
# Max log-likelihood
max_ll = max(votes.values())
# Apply the log-sum-exp trick (https://stats.stackexchange.com/a/253319)
lse = max_ll + math.log(
sum(math.exp(log_proba - max_ll) for log_proba in votes.values())
)
for class_index in votes:
votes[class_index] = math.exp(votes[class_index] - lse)
return votes
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.0-rc1
# kernelspec:
# display_name: Python [conda env:b36]
# language: python
# name: conda-env-b36-py
# ---
# %% [markdown]
# # imports
# %%
from useful_scit.imps2.defs import *
# %%
from skimage.util.shape import view_as_windows as viewW
def strided_indexing_roll(a, r):
# Concatenate with sliced to cover all rolls
assert a.shape[0]==r.shape[0], 'roll vector does not match matrix shape'
p = np.full((a.shape[0],a.shape[1]-1),np.nan)
a_ext = np.concatenate((p,a,p),axis=1)
# Get sliding windows; use advanced-indexing to select appropriate ones
n = a.shape[1]
return viewW(a_ext,(1,n))[np.arange(len(r)), -r + (n-1),0]
# %% [markdown]
# # example
# %%
# %%
a = np.array([
[ 1, 2, 3],
[ 1, 2, 3],
[11,22,33],
[21,22,23]
])
r = np.array([1,2,3,4])
# %%
strided_indexing_roll(a,r)
# %%
viewW(a_ext,(1,5))[0]
# %%
a.shape[1]==r.shape[0]
# %%
# %load_ext memory_profiler
# %% [markdown]
# # flx example
# %%
def f1():
f = '/Users/diego/flexpart_management/flexpart_management/notebooks/log_pol_revisited/data/flxout_d02_20171217_000000.nc'
d = '/Users/diego/flexpart_management/flexpart_management/notebooks/log_pol_revisited/data/header_d02.nc'
ds = xr.open_dataset(f)
hs = xr.open_dataset(d)
to = hs['TOPOGRAPHY']
shifts = np.round(to/500).astype(int)
co = ds['CONC']
co = co[{'Time':0,'releases':0,'ageclass':0}]
# co.sum('south_north').plot()
c1 = co.stack({'sw':['south_north','west_east']})
s1 = shifts.stack({'sw':['south_north','west_east']})
c1d = c1.values
s1d = s1.values
res = strided_indexing_roll(c1d.T,s1d).T
c1d.shape,res.shape
c2 = xr.full_like(c1,0.0) + res
c3 = c2.unstack()
c4 = c3.sum('south_north') - co.sum('south_north')
# c4.plot()
return c4
# c4.plot.contour()
# %%
# doesnt work
def f2():
f = '/Users/diego/flexpart_management/flexpart_management/notebooks/log_pol_revisited/data/flxout_d02_20171217_000000.nc'
d = '/Users/diego/flexpart_management/flexpart_management/notebooks/log_pol_revisited/data/header_d02.nc'
ds = xr.open_dataset(f)
hs = xr.open_dataset(d)
to = hs['TOPOGRAPHY']
shifts = np.floor(to/500).astype(int)
co = ds['CONC']
co = co[{'Time':slice(0,10),'releases':0,'ageclass':0}]
# co.sum('south_north').plot()
sdim = ['south_north','west_east','Time']
c1 = co.stack({'sw':sdim})
s1 = shifts.stack({'sw':sdim})
c1d = c1.values
s1d = s1.values
res = strided_indexing_roll(c1d.T,s1d).T
c1d.shape,res.shape
c2 = xr.full_like(c1,0.0) + res
c3 = c2.unstack()
# c4 = c3.sum('south_north') - co.sum('south_north')
# c4.plot()
return c4
# %%
# %load_ext line_profiler
# %%
# # %timeit c4 = f1()
# %lprun -f f1 c4 = f1()
# c4.plot()
# %% [markdown]
# # from previous
# %%
from flexpart_alto.modules import constants as co
# %%
def from_agl_to_asl(
ds ,
ds_var='conc_norm' ,
delta_z=500 ,
z_top=15000 ,
ds_var_name_out=None
) :
log.ger.warning(
f'this will only work if ds z levels are constant' )
import wrf
t_list = [ co.ZM , co.R_CENTER , co.TH_CENTER ]
d3d = ds[ ds_var ] # .sum( [ co.RL ] )
d3d_attrs = d3d.attrs
d3d = d3d.transpose(
co.RL , *t_list , transpose_coords=True
)
dz = d3d[ co.TOPO ] + np.round( d3d[ co.ZM ] / delta_z ) * delta_z
d3d = d3d.reset_coords( drop=True )
dz = dz.transpose( *t_list , transpose_coords=True )
dz = dz.reset_coords( drop=True )
# %%
# print( d3d.shape )
# print( dz.shape )
# %%
z_lev = np.arange( delta_z / 2 , z_top , delta_z )
da_interp = wrf.interplevel( d3d , dz , z_lev )
da_reinterp = da_interp.rename( level=co.ZM )
# %%
ds_chop = ds.isel( { co.ZM : slice( 0 , len( da_reinterp[ co.ZM ] ) ) } )
for coord in list( ds.coords ) :
da_reinterp = da_reinterp.assign_coords(
**{ coord : ds_chop[ coord ] } )
if ds_var_name_out is not None :
da_reinterp.name = ds_var_name_out
# we do this in order to avoid the problem of setting attributes
# to none that cannot be saved using to netcdf.
da_reinterp.attrs = d3d_attrs
ds_reinterp = da_reinterp.to_dataset()
# todo: check that concentrations are the same after resampling
return ds_reinterp
# %%
f = '/Users/diego/flexpart_management/flexpart_management/notebooks/log_pol_revisited/data/flxout_d02_20171217_000000.nc'
d = '/Users/diego/flexpart_management/flexpart_management/notebooks/log_pol_revisited/data/header_d02.nc'
ds = xr.open_dataset(f)
hs = xr.open_dataset(d)
# %%
ds
# %%
|
"""
Exemplary workflow using the py-fmas library code.
.. codeauthor:: Oliver Melchert <[email protected]>
"""
import fmas
import numpy as np
glob = fmas.data_io.read_h5("in_file.h5")
grid = fmas.grid.Grid(
t_max=glob.t_max, t_num=glob.t_num, z_max=glob.z_max, z_num=glob.z_num
)
model = fmas.models.FMAS_S_Raman(
w=grid.w, beta_w=glob.beta_w, n2=glob.n2, fR=glob.fR, tau1=glob.tau1, tau2=glob.tau2
)
ic = fmas.analytic_signal.AS(glob.E_0t)
def Cp(i, zi, w, uw):
Iw = np.abs(uw) ** 2
return np.sum(Iw[w > 0] / w[w > 0])
solver = fmas.solver.IFM_RK4IP(model.Lw, model.Nw, user_action=Cp)
solver.set_initial_condition(grid.w, ic.w_rep)
solver.propagate(z_range=glob.z_max, n_steps=glob.z_num, n_skip=glob.z_skip)
res = {"t": grid.t, "z": solver.z, "w": solver.w, "u": solver.utz, "Cp": solver.ua_vals}
fmas.data_io.save_h5("out_file.h5", **res)
fmas.tools.plot_evolution(
solver.z, grid.t, solver.utz, t_lim=(-500, 2200), w_lim=(1.0, 4.0)
)
|
# -*- coding: utf-8 -*-
from rest_framework import viewsets, mixins, decorators, response
from apps.contact import models
from . import serializers
class ContactViewSetMixin:
def get_queryset(self):
qs = super().get_queryset()
type = self.request.query_params.get('type', None)
if type:
qs = qs.filter(type=type)
return qs
class TitleViewSet(ContactViewSetMixin, viewsets.ModelViewSet):
queryset = models.Title.objects.all()
serializer_class = serializers.TitleSerializer
class ContactViewSet(ContactViewSetMixin, viewsets.ModelViewSet):
queryset = models.Contact.objects.all()
serializer_class = serializers.ContactSerializer
def get_queryset(self):
qs = super().get_queryset()
component = self.request.query_params.get('component', None)
if component:
qs = qs.filter(**{f'show_in_{component}': True})
return qs
@decorators.action(methods=['POST'], detail=False, url_path='me')
def contact_me(self, request):
serializer = serializers.ContactMeEntrySerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return response.Response(data=serializer.data)
@decorators.action(methods=['GET'], detail=False, url_path='photo')
def photo(self, request):
photo = models.Photo.objects.first()
url = photo.original.url if photo else ''
return response.Response(data={'url': url})
|
from typing import Generator, List, Dict
import pandas as pd
from gym import Space
import pytest
from tensortrade import TradingContext
from tensortrade.exchanges import Exchange
from tensortrade.trades import Trade
@pytest.mark.xskip(reason="GAN exchange is not complete. ")
def test_create_gan_exchnage():
""" GAN is not complete. Will not do this. """
pass |
print('=' * 5, 'EX_003', '=' * 5)
nome = input('Digite seu nome: ')
print('Bem vindo, {}!'.format(nome))
|
import os
from pydantic import BaseSettings
class Settings(BaseSettings):
class Config:
env_file = ".env"
app_name: str = "FastAPI Demo"
admin_email: str = "[email protected]"
secret_key: str = os.getenv("SECRET_KEY")
hash_algo: str = os.getenv("HASH_ALGO", "HS256")
access_token_expiration: int = os.getenv("ACCESS_TOKEN_EXPIRATION", 86400)
settings = Settings()
|
import sys, time
def readanswers():
try:
answers = {}
f = open('answers.txt')
try:
for line in f.readlines():
line = line.strip()
if line:
problem, answer = line.split(':')
answers[int(problem.strip())] = int(answer.strip())
finally:
f.close()
return answers
except Exception:
print 'Could not read file: answers.txt'
return {}
def main():
answers = readanswers()
problems = [int(arg) for arg in sys.argv[1:]] or sorted(answers)
format = '%10s %15s %15s %10s'
print format % ('PROBLEM', 'RESULT', 'CORRECTION', 'DURATION')
for problem in problems:
t = time.time()
try:
module = __import__(str(problem))
result = module.solve()
except Exception:
result = '...'
duration = '%.1f' % (time.time() - t)
correction = '...'
if problem in answers and result != answers[problem]:
correction = answers[problem]
print format % (problem, result, correction, duration)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from contextlib import suppress
from timeit import default_timer
from threading import Event, Thread, Lock
import os
import time
import sys
try:
from dask.callbacks import Callback
except ImportError as e:
opt_import_err = e
Callback = object
else:
opt_import_err = None
from africanus.util.docs import DefaultOut
from africanus.util.requirements import requires_optional
def format_time(t):
"""Format seconds into a human readable form."""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
w, d = divmod(d, 7)
if w:
return "{0:2.0f}w{1:2.0f}d".format(w, d)
elif d:
return "{0:2.0f}d{1:2.0f}h".format(d, h)
elif h:
return "{0:2.0f}h{1:2.0f}m".format(h, m)
elif m:
return "{0:2.0f}m{1:2.0f}s".format(m, s)
else:
return "{0:5.0f}s".format(s)
def key_bin(key):
if type(key) is tuple:
key = key[0]
if type(key) is bytes:
key = key.decode()
try:
return str(key)
except Exception:
return "other"
class TaskData(object):
__slots__ = ("total", "completed", "time_sum")
def __init__(self, completed=0, total=0, time_sum=0.0):
self.completed = completed
self.total = total
self.time_sum = time_sum
def __iadd__(self, other):
self.completed += other.completed
self.total += other.total
self.time_sum += other.time_sum
return self
def __add__(self, other):
return TaskData(self.completed + other.completed,
self.total + other.total,
self.time_sum + other.time_sum)
def __repr__(self):
return "TaskData(%s, %s, %s)" % (self.completed,
self.total,
self.time_sum)
__str__ = __repr__
def update_bar(elapsed, prev_completed, prev_estimated, pb):
total = 0
completed = 0
estimated = 0.0
time_guess = 0.0
# update
with pb._lock:
for k, v in pb.task_data.items():
total += v.total
completed += v.completed
if v.completed > 0:
avg_time = v.time_sum / v.completed
estimated += avg_time * v.total
time_guess += v.time_sum
# If we've completed some new tasks, update our estimate
# otherwise use previous estimate. This prevents jumps
# relative to the elapsed time
if completed != prev_completed:
estimated = estimated * elapsed / time_guess
else:
estimated = prev_estimated
# For the first 10 seconds, tell the user estimates improve over time
# then display the bar
if elapsed < 10.0:
fraction = 0.0
bar = " estimate improves over time"
else:
# Print out the progress bar
fraction = elapsed / estimated if estimated > 0.0 else 0.0
bar = "#" * int(pb._width * fraction)
percent = int(100 * fraction)
msg = "\r[{0:{1}.{1}}] | {2}% Complete (Estimate) | {3} / ~{4}".format(
bar, pb._width, percent,
format_time(elapsed),
"???" if estimated == 0.0 else format_time(estimated))
with suppress(ValueError):
pb._file.write(msg)
pb._file.flush()
return completed, estimated
def timer_func(pb):
start = default_timer()
while pb.running.is_set():
elapsed = default_timer() - start
prev_completed = 0
prev_estimated = 0.0
if elapsed > pb._minimum:
prev_completed, prev_estimated = update_bar(elapsed,
prev_completed,
prev_estimated,
pb)
time.sleep(pb._dt)
default_out = DefaultOut("sys.stdout")
class EstimatingProgressBar(Callback):
"""
Progress Bar that displays elapsed time as well as an
estimate of total time taken.
When starting a dask computation,
the bar examines the graph and determines
the number of chunks contained by a dask collection.
During computation the number of completed chunks and
their the total time taken to complete them are
tracked. The average derived from these numbers are
used to estimate total compute time, relative to
the current elapsed time.
The bar is not particularly accurate and will
underestimate near the beginning of computation
and seems to slightly overestimate during the
buk of computation. However, it may be more accurate
than the default dask task bar which tracks
number of tasks completed by total tasks.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar, default is 42 characters.
dt : float, optional
Update resolution in seconds, default is 1.0 seconds.
"""
@requires_optional("dask", opt_import_err)
def __init__(self, minimum=0, width=42, dt=1.0, out=default_out):
if out is None:
out = open(os.devnull, "w")
elif out is default_out:
out = sys.stdout
self._minimum = minimum
self._width = width
self._dt = dt
self._file = out
self._lock = Lock()
def _start(self, dsk):
self.task_start = {}
self.task_data = defaultdict(TaskData)
for k, v in dsk.items():
self.task_data[key_bin(k)].total += 1
self.running = Event()
self.running.set()
self.thread = Thread(target=timer_func, args=(self,))
self.daemon = True
self.thread.start()
def _finish(self, dsk, state, errored):
self.running.clear()
self.task_data.clear()
self.task_start.clear()
def _pretask(self, key, dsk, state):
with self._lock:
self.task_start[key] = default_timer()
def _posttask(self, key, result, dsk, state, worker_id):
with self._lock:
td = self.task_data[key_bin(key)]
td.time_sum += default_timer() - self.task_start.pop(key)
td.completed += 1
|
import yfinance as yf
import datetime as dt
import pandas as pd
from pandas_datareader import data as pdr
yf.pdr_override() # <== that's all it takes :-)
start =dt.datetime(1980,12,1)
now = dt.datetime.now()
stock=""
stock = input("Enter the stock symbol : ")
while stock != "quit":
df = pdr.get_data_yahoo(stock, start, now)
df.drop(df[df["Volume"]<1000].index, inplace=True)
dfmonth=df.groupby(pd.Grouper(freq="M"))["High"].max()
glDate=0
lastGLV=0
currentDate=""
curentGLV=0
for index, value in dfmonth.items():
if value > curentGLV:
curentGLV=value
currentDate=index
counter=0
if value < curentGLV:
counter=counter+1
if counter==3 and ((index.month != now.month) or (index.year != now.year)):
if curentGLV != lastGLV:
print(curentGLV)
glDate=currentDate
lastGLV=curentGLV
counter=0
if lastGLV==0:
message=stock+" has not formed a green line yet"
else:
message=("Last Green Line: "+str(lastGLV)+" on "+str(glDate))
print(message)
stock = input("Enter the stock symbol : ")
|
import pickle
import scipy.stats as st
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import auc
from sklearn.model_selection import StratifiedKFold
import xgboost as xgb
from sklearn.model_selection import KFold
from sklearn.metrics import matthews_corrcoef,make_scorer
def train_xgb(X,
y,
mod_number=1,
cv=None,
outfile="model.pickle",
n_iter_search=100,
nfolds=20,
random_state=42):
"""
Train an XGBoost model with hyper parameter optimization.
Parameters
----------
X : matrix
Matrix with all the features, every instance should be coupled to the y-value
y : vector
Vector with the class, every value should be coupled to an x-vector with features
Returns
-------
object
Trained XGBoost model
object
Cross-validation results
"""
xgb_handle = xgb.XGBClassifier()
one_to_left = st.beta(10, 1)
from_zero_positive = st.expon(0, 50)
#Define distributions to sample from for hyper parameter optimization
param_dist = {
"n_estimators": st.randint(25, 150),
"max_depth": st.randint(5, 10),
"learning_rate": st.uniform(0.05, 0.4),
#"colsample_bytree": one_to_left,
"subsample": one_to_left,
"gamma": st.uniform(0, 10),
"reg_alpha": from_zero_positive,
"min_child_weight": from_zero_positive,
}
if not cv: cv = KFold(n_splits=nfolds, shuffle=True,random_state=random_state)
mcc = make_scorer(matthews_corrcoef)
random_search = RandomizedSearchCV(xgb_handle, param_distributions=param_dist,
n_iter=n_iter_search,verbose=10,scoring="roc_auc",
n_jobs=1,refit=True,cv=cv)
random_search.fit(X, y)
random_search.feats = X.columns
pickle.dump(random_search,open(outfile,"wb"))
return(random_search.best_score_) |
'''
Beginning with just a plot
Let's get started on the Gapminder app. Your job is to make the ColumnDataSource object, prepare the plot, and add circles for Life expectancy vs Fertility. You'll also set x and y ranges for the axes.
As in the previous chapter, the DataCamp environment executes the bokeh serve command to run the app for you. When you hit 'Submit Answer', you'll see in the IPython Shell that bokeh serve script.py gets called to run the app. This is something to keep in mind when you are creating your own interactive visualizations outside of the DataCamp environment.
INSTRUCTIONS
100XP
Make a ColumnDataSource object called source with 'x', 'y', 'country', 'pop' and 'region' keys. The Pandas selections are provided for you.
Save the minimum and maximum values of the life expectancy column data.life as ymin and ymax. As a guide, you can refer to the way we saved the minimum and maximum values of the fertility column data.fertility as xmin and xmax.
Create a plot called plot() by specifying the title, setting plot_height to 400, plot_width to 700, and adding the x_range and y_range parameters.
Add circle glyphs to the plot. Specify an fill_alpha of 0.8 and source=source.
'''
# Import the necessary modules
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
# Make the ColumnDataSource: source
source = ColumnDataSource(data={
'x' : data.loc[1970].fertility,
'y' : data.loc[1970].life,
'country' : data.loc[1970].Country,
'pop' : (data.loc[1970].population / 20000000) + 2,
'region' : data.loc[1970].region,
})
# Save the minimum and maximum values of the fertility column: xmin, xmax
xmin, xmax = min(data.fertility), max(data.fertility)
# Save the minimum and maximum values of the life expectancy column: ymin, ymax
ymin, ymax = min(data.life), max(data.life)
# Create the figure: plot
plot = figure(title='Gapminder Data for 1970', plot_height=400, plot_width=700, x_range=(xmin, xmax), y_range=(ymin, ymax))
# Add circle glyphs to the plot
plot.circle(x='x', y='y', fill_alpha=0.8, source=source)
# Set the x-axis label
plot.xaxis.axis_label ='Fertility (children per woman)'
# Set the y-axis label
plot.yaxis.axis_label = 'Life Expectancy (years)'
# Add the plot to the current document and add a title
curdoc().add_root(plot)
curdoc().title = 'Gapminder'
|
# Copyright (c) 2021, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Readers for ASCAT Level 1b and Level 2 data in EPS Native format.
"""
import os
import fnmatch
from gzip import GzipFile
from collections import OrderedDict, defaultdict
from tempfile import NamedTemporaryFile
import numpy as np
import xarray as xr
import lxml.etree as etree
from cadati.jd_date import jd2dt
from ascat.utils import get_toi_subset, get_roi_subset
short_cds_time = np.dtype([('day', '>u2'), ('time', '>u4')])
long_cds_time = np.dtype([('day', '>u2'), ('ms', '>u4'), ('mms', '>u2')])
long_nan = np.iinfo(np.int32).min
ulong_nan = np.iinfo(np.uint32).max
int_nan = np.iinfo(np.int16).min
uint_nan = np.iinfo(np.uint16).max
byte_nan = np.iinfo(np.byte).min
int8_nan = np.iinfo(np.int8).max
uint8_nan = np.iinfo(np.uint8).max
float32_nan = -999999.
# 2000-01-01 00:00:00
julian_epoch = 2451544.5
class AscatL1bEpsSzfFile:
"""
ASCAT Level 1b EPS Native reader class.
"""
def __init__(self, filename):
"""
Initialize AscatL1bEpsFile.
Parameters
----------
filename : str
Filename.
"""
self.filename = filename
def read(self, toi=None, roi=None, generic=True, to_xarray=False):
"""
Read ASCAT Level 1b data.
Returns
-------
ds : xarray.Dataset
ASCAT Level 1b data.
"""
ds = read_eps_l1b(self.filename, generic, to_xarray,
full=False, unsafe=True, scale_mdr=False)
if toi:
ds = get_toi_subset(ds, toi)
if roi:
ds = get_roi_subset(ds, roi)
return ds
def read_period(self, dt_start, dt_end, **kwargs):
"""
Read interval.
"""
return self.read(toi=(dt_start, dt_end), **kwargs)
def close(self):
"""
Close file.
"""
pass
class AscatL1bEpsFile:
"""
ASCAT Level 1b EPS Native reader class.
"""
def __init__(self, filename):
"""
Initialize AscatL1bEpsFile.
Parameters
----------
filename : str
Filename.
"""
self.filename = filename
def read(self, generic=False, to_xarray=False):
"""
Read ASCAT Level 1b data.
Returns
-------
ds : xarray.Dataset
ASCAT Level 1b data.
"""
return read_eps_l1b(self.filename, generic, to_xarray)
def close(self):
"""
Close file.
"""
pass
class AscatL2EpsFile:
"""
ASCAT Level 2 EPS Native reader class.
"""
def __init__(self, filename):
"""
Initialize AscatL2EpsFile.
Parameters
----------
filename : str
Filename.
"""
self.filename = filename
def read(self, generic=False, to_xarray=False):
"""
Read ASCAT Level 2 data.
Returns
-------
ds : dict, xarray.Dataset
ASCAT Level 1b data.
"""
return read_eps_l2(self.filename, generic, to_xarray)
def close(self):
"""
Close file.
"""
pass
class EPSProduct:
"""
Class for reading EPS products.
"""
def __init__(self, filename):
"""
Initialize EPSProduct.
Parameters
----------
filename : str
EPS Native Filename.
"""
self.filename = filename
self.fid = None
self.mphr = None
self.sphr = None
self.aux = defaultdict(list)
self.mdr = None
self.scaled_mdr = None
self.xml_file = None
self.xml_doc = None
self.mdr_template = None
self.scaled_template = None
self.sfactor = None
self.grh_dtype = np.dtype([('record_class', 'u1'),
('instrument_group', 'u1'),
('record_subclass', 'u1'),
('record_subclass_version', 'u1'),
('record_size', '>u4'),
('record_start_time', short_cds_time),
('record_stop_time', short_cds_time)])
self.ipr_dtype = np.dtype([('grh', self.grh_dtype),
('target_record_class', 'u1'),
('target_instrument_group', 'u1'),
('target_record_subclass', 'u1'),
('target_record_offset', '>u4')])
self.pointer_dtype = np.dtype([('grh', self.grh_dtype),
('aux_data_pointer', 'u1', 100)])
self.filesize = os.path.getsize(self.filename)
def read_mphr(self):
"""
Read only Main Product Header Record (MPHR).
"""
with open(self.filename, 'rb') as fid:
grh = np.fromfile(fid, dtype=self.grh_dtype, count=1)[0]
if grh['record_class'] == 1:
mphr = fid.read(grh['record_size'] - grh.itemsize)
mphr = OrderedDict(item.replace(' ', '').split('=')
for item in
mphr.decode("utf-8").split('\n')[:-1])
return mphr
def read(self, full=True, unsafe=False, scale_mdr=True):
"""
Read EPS file.
Parameters
----------
full : bool, optional
Read full file content (True) or just Main Product Header
Record (MPHR) and Main Data Record (MDR) (False). Default: True
unsafe : bool, optional
If True it is (unsafely) assumed that MDR are continuously
stacked until the end of file. Makes reading a lot faster.
Default: False
scale_mdr : bool, optional
Compute scaled MDR (True) or not (False). Default: True
Returns
-------
mphr : dict self.sphr, self.aux, self.mdr, scaled_mdr
Main Product Header Record (MPHR).
sphr : dict
Secondary Product Header Product (SPHR).
aux : dict
Auxiliary Header Products.
mdr : numpy.ndarray
Main Data Record (MDR)
scaled_mdr : numpy.ndarray
Scaled Main Data Record (MPHR) or None if not computed.
"""
self.fid = open(self.filename, 'rb')
abs_pos = 0
grh = None
prev_grh = None
record_count = 0
while True:
# read generic record header of data block
grh = np.fromfile(self.fid, dtype=self.grh_dtype, count=1)[0]
if grh['record_class'] == 8 and unsafe:
if np.mod((self.filesize - abs_pos),
self.mdr_template.itemsize) != 0:
# Unsafe reading fails, switching to safe reading
unsafe = False
else:
num_mdr = (self.filesize -
abs_pos) // self.mdr_template.itemsize
self.fid.seek(abs_pos)
self.read_record_class(grh, num_mdr)
break
if prev_grh is None:
prev_grh = grh
if ((prev_grh['record_class'] != grh['record_class']) or
(prev_grh['record_subclass'] != grh['record_subclass'])):
# compute record start position of previous record
start_pos = (abs_pos - prev_grh['record_size'] * record_count)
self.fid.seek(start_pos)
if full or (prev_grh['record_class'] == 8 or
prev_grh['record_class'] == 1):
# read previous record, because new one is coming
self.read_record_class(prev_grh, record_count)
# reset record class count
record_count = 1
else:
# same record class as before, increase count
record_count += 1
abs_pos += grh['record_size']
# position after record
self.fid.seek(abs_pos)
# store grh
prev_grh = grh
# end of file?
if abs_pos == self.filesize:
# compute record start position of previous record class
start_pos = (abs_pos - prev_grh['record_size'] * record_count)
self.fid.seek(start_pos)
# read final record class(es)
self.read_record_class(prev_grh, record_count)
break
self.fid.close()
if scale_mdr:
self.scaled_mdr = self._scaling(self.mdr, self.scaled_template,
self.mdr_sfactor)
return self.mphr, self.sphr, self.aux, self.mdr, self.scaled_mdr
def read_record_class(self, grh, record_count):
"""
Read record class.
Parameters
----------
grh : numpy.ndarray
Generic record header.
record_count : int
Number of records.
"""
# mphr (Main Product Header Reader)
if grh['record_class'] == 1:
self.fid.seek(grh.itemsize, 1)
self._read_mphr(grh)
# find the xml file corresponding to the format version
# and load template
self.xml_file = self._get_eps_xml()
self.xml_doc = etree.parse(self.xml_file)
self.mdr_template, self.scaled_template, self.mdr_sfactor = \
self._read_xml_mdr()
# sphr (Secondary Product Header Record)
elif grh['record_class'] == 2:
self.fid.seek(grh.itemsize, 1)
self._read_sphr(grh)
# ipr (Internal Pointer Record)
elif grh['record_class'] == 3:
data = np.fromfile(self.fid, dtype=self.ipr_dtype,
count=record_count)
self.aux['ipr'].append(data)
# geadr (Global External Auxiliary Data Record)
elif grh['record_class'] == 4:
data = self._read_pointer(record_count)
self.aux['geadr'].append(data)
# veadr (Variable External Auxiliary Data Record)
elif grh['record_class'] == 6:
data = self._read_pointer(record_count)
self.aux['veadr'].append(data)
# viadr (Variable Internal Auxiliary Data Record)
elif grh['record_class'] == 7:
template, scaled_template, sfactor = self._read_xml_viadr(
grh['record_subclass'])
viadr_element = np.fromfile(self.fid, dtype=template,
count=record_count)
viadr_element_sc = self._scaling(viadr_element,
scaled_template, sfactor)
# store viadr_grid separately
if grh['record_subclass'] == 8:
self.aux['viadr_grid'].append(viadr_element)
self.aux['viadr_grid_scaled'].append(viadr_element_sc)
else:
self.aux['viadr'].append(viadr_element)
self.aux['viadr_scaled'].append(viadr_element_sc)
# mdr (Measurement Data Record)
elif grh['record_class'] == 8:
if grh['instrument_group'] == 13:
self.dummy_mdr = np.fromfile(
self.fid, dtype=self.mdr_template, count=record_count)
else:
self.mdr = np.fromfile(
self.fid, dtype=self.mdr_template, count=record_count)
self.mdr_counter = record_count
else:
raise RuntimeError("Record class not found.")
def _scaling(self, unscaled_mdr, scaled_template, sfactor):
"""
Scale the MDR.
Parameters
----------
unscaled_mdr : numpy.ndarray
Raw MDR.
scaled_template : numpy.dtype
Scaled MDR template.
sfactor : dict
Scale factors.
Returns
-------
scaled_mdr : numpy.ndarray
Scaled MDR.
"""
scaled_mdr = np.empty(unscaled_mdr.shape, dtype=scaled_template)
for key, value in sfactor.items():
if value != 1:
scaled_mdr[key] = unscaled_mdr[key] * 1./value
else:
scaled_mdr[key] = unscaled_mdr[key]
return scaled_mdr
def _read_mphr(self, grh):
"""
Read Main Product Header (MPHR).
"""
mphr = self.fid.read(grh['record_size'] - grh.itemsize)
self.mphr = OrderedDict(item.replace(' ', '').split('=')
for item in
mphr.decode("utf-8").split('\n')[:-1])
def _read_sphr(self, grh):
"""
Read Special Product Header (SPHR).
"""
sphr = self.fid.read(grh['record_size'] - grh.itemsize)
self.sphr = OrderedDict(item.replace(' ', '').split('=')
for item in
sphr.decode("utf-8").split('\n')[:-1])
def _read_pointer(self, count=1):
"""
Read pointer record.
"""
record = np.fromfile(self.fid, dtype=self.pointer_dtype, count=count)
return record
def _get_eps_xml(self):
"""
Find the corresponding eps xml file.
"""
format_path = os.path.join(os.path.dirname(__file__), 'formats')
# loop through files where filename starts with 'eps_ascat'.
for filename in fnmatch.filter(os.listdir(format_path), 'eps_ascat*'):
doc = etree.parse(os.path.join(format_path, filename))
file_extension = doc.xpath('//file-extensions')[0].getchildren()[0]
format_version = doc.xpath('//format-version')
for elem in format_version:
major = elem.getchildren()[0]
minor = elem.getchildren()[1]
# return the xml file matching the metadata of the datafile.
if major.text == self.mphr['FORMAT_MAJOR_VERSION'] and \
minor.text == self.mphr['FORMAT_MINOR_VERSION'] and \
self.mphr[
'PROCESSING_LEVEL'] in file_extension.text and \
self.mphr['PRODUCT_TYPE'] in file_extension.text:
return os.path.join(format_path, filename)
def _read_xml_viadr(self, subclassid):
"""
Read xml record of viadr class.
"""
elements = self.xml_doc.xpath('//viadr')
data = OrderedDict()
length = []
# find the element with the correct subclass
for elem in elements:
item_dict = dict(elem.items())
subclass = int(item_dict['subclass'])
if subclass == subclassid:
break
for child in elem.getchildren():
if child.tag == 'delimiter':
continue
child_items = dict(child.items())
name = child_items.pop('name')
# check if the item is of type longtime
longtime_flag = ('type' in child_items and
'longtime' in child_items['type'])
# append the length if it isn't the special case of type longtime
try:
var_len = child_items.pop('length')
if not longtime_flag:
length.append(np.int(var_len))
except KeyError:
pass
data[name] = child_items
if child.tag == 'array':
for arr in child.iterdescendants():
arr_items = dict(arr.items())
if arr.tag == 'field':
data[name].update(arr_items)
else:
try:
var_len = arr_items.pop('length')
length.append(np.int(var_len))
except KeyError:
pass
if length:
data[name].update({'length': length})
else:
data[name].update({'length': 1})
length = []
conv = {'longtime': long_cds_time, 'time': short_cds_time,
'boolean': 'u1', 'integer1': 'i1',
'uinteger1': 'u1', 'integer': '>i4',
'uinteger': '>u4', 'integer2': '>i2',
'uinteger2': '>u2', 'integer4': '>i4',
'uinteger4': '>u4', 'integer8': '>i8',
'enumerated': 'u1', 'string': 'str', 'bitfield': 'u1'}
scaling_factor = {}
scaled_dtype = []
dtype = []
for key, value in data.items():
if 'scaling-factor' in value:
sf_dtype = np.float32
sf_split = value['scaling-factor'].split('^')
scaling_factor[key] = np.int(sf_split[0])**np.int(sf_split[1])
else:
sf_dtype = conv[value['type']]
scaling_factor[key] = 1
length = value['length']
if length == 1:
scaled_dtype.append((key, sf_dtype))
dtype.append((key, conv[value['type']]))
else:
scaled_dtype.append((key, sf_dtype, length))
dtype.append((key, conv[value['type']], length))
return np.dtype(dtype), np.dtype(scaled_dtype), scaling_factor
def _read_xml_mdr(self):
"""
Read xml record of mdr class.
"""
elements = self.xml_doc.xpath('//mdr')
data = OrderedDict()
length = []
elem = elements[0]
for child in elem.getchildren():
if child.tag == 'delimiter':
continue
child_items = dict(child.items())
name = child_items.pop('name')
# check if the item is of type bitfield
bitfield_flag = ('type' in child_items and
('bitfield' in child_items['type'] or 'time' in
child_items['type']))
# append the length if it isn't the special case of type
# bitfield or time
try:
var_len = child_items.pop('length')
if not bitfield_flag:
length.append(np.int(var_len))
except KeyError:
pass
data[name] = child_items
if child.tag == 'array':
for arr in child.iterdescendants():
arr_items = dict(arr.items())
# check if the type is bitfield
bitfield_flag = ('type' in arr_items and
'bitfield' in arr_items['type'])
if bitfield_flag:
data[name].update(arr_items)
break
else:
if arr.tag == 'field':
data[name].update(arr_items)
else:
try:
var_len = arr_items.pop('length')
length.append(np.int(var_len))
except KeyError:
pass
if length:
data[name].update({'length': length})
else:
data[name].update({'length': 1})
length = []
conv = {'longtime': long_cds_time, 'time': short_cds_time,
'boolean': 'u1', 'integer1': 'i1',
'uinteger1': 'u1', 'integer': '>i4',
'uinteger': '>u4', 'integer2': '>i2',
'uinteger2': '>u2', 'integer4': '>i4',
'uinteger4': '>u4', 'integer8': '>i8',
'enumerated': 'u1', 'string': 'str', 'bitfield': 'u1'}
scaling_factor = {}
scaled_dtype = []
dtype = [('grh', self.grh_dtype)]
for key, value in data.items():
if 'scaling-factor' in value:
sf_dtype = np.float32
sf_split = value['scaling-factor'].split('^')
scaling_factor[key] = np.int(sf_split[0])**np.int(sf_split[1])
else:
sf_dtype = conv[value['type']]
scaling_factor[key] = 1
length = value['length']
if length == 1:
scaled_dtype.append((key, sf_dtype))
dtype.append((key, conv[value['type']]))
else:
scaled_dtype.append((key, sf_dtype, length))
dtype.append((key, conv[value['type']], length))
return np.dtype(dtype), np.dtype(scaled_dtype), scaling_factor
def conv_epsl1bszf_generic(data, metadata):
"""
Rename and convert data types of dataset.
Parameters
----------
data : dict of numpy.ndarray
Original dataset.
metadata : dict
Metadata.
Returns
-------
data : dict of numpy.ndarray
Converted dataset.
"""
skip_fields = ['utc_localisation-days', 'utc_localisation-milliseconds',
'degraded_inst_mdr', 'degraded_proc_mdr', 'flagfield_rf1',
'flagfield_rf2', 'flagfield_pl', 'flagfield_gen1',
'flagfield_gen2']
gen_fields_lut = {
'inc_angle_full': ('inc', np.float32, (0, 90), float32_nan),
'azi_angle_full': ('azi', np.float32, (0, 360), float32_nan),
'sigma0_full': ('sig', np.float32, (-35, 35), float32_nan),
'sat_track_azi': ('sat_track_azi', np.float32, (0, 360), float32_nan),
'beam_number': ('beam_number', np.int8, (1, 6), int8_nan),
'swath_indicator': ('swath_indicator', np.int8, (0, 1), int8_nan),
'land_frac': ('land_frac', np.float32, (0, 1), float32_nan),
'f_usable': ('f_usable', np.int8, (0, 2), int8_nan),
# 'f_land': ('f_land', np.int8, (0, 1), int8_nan),
'as_des_pass': ('as_des_pass', np.uint8, (0, 1), uint8_nan),
'time': ('time', None, (np.datetime64('1900-01-01'),
np.datetime64('2100-01-01')), 0),
'lon': ('lon', np.float32, (-180, 180), float32_nan),
'lat': ('lat', np.float32, (-90, 90), float32_nan)}
for var_name in skip_fields:
data.pop(var_name, None)
for var_name, (new_name, new_dtype, valid_range, nan_val) in gen_fields_lut.items():
if new_dtype is None:
data[new_name] = np.ma.array(data.pop(var_name))
data[new_name].mask = ((data[new_name] < valid_range[0]) |
(data[new_name] > valid_range[1]))
data[new_name].set_fill_value(nan_val)
else:
data[new_name] = np.ma.array(data.pop(var_name).astype(new_dtype))
data[new_name].mask = ((data[new_name] < valid_range[0]) |
(data[new_name] > valid_range[1]))
data[new_name].set_fill_value(nan_val)
return data
def conv_epsl1bszx_generic(data, metadata):
"""
Rename and convert data types of dataset.
Parameters
----------
data : dict of numpy.ndarray
Original dataset.
metadata : dict
Metadata.
Returns
-------
data : dict of numpy.ndarray
Converted dataset.
"""
gen_fields_lut = {'inc_angle_trip': ('inc', np.float32, uint_nan),
'azi_angle_trip': ('azi', np.float32, int_nan),
'sigma0_trip': ('sig', np.float32, long_nan),
'kp': ('kp', np.float32, uint_nan),
'f_kp': ('kp_quality', np.uint8, uint8_nan)}
skip_fields = ['flagfield_rf1', 'f_f', 'f_v', 'f_oa', 'f_sa', 'f_tel']
for var_name in skip_fields:
if var_name in data:
data.pop(var_name)
for var_name, (new_name, new_dtype, nan_val) in gen_fields_lut.items():
data[new_name] = data.pop(var_name).astype(new_dtype)
if nan_val is not None:
data[new_name][data[new_name] == nan_val] = float32_nan
data['sat_id'] = np.repeat(metadata['sat_id'], data['time'].size)
return data
def conv_epsl2szx_generic(data, metadata):
"""
Rename and convert data types of dataset.
Parameters
----------
data : dict of numpy.ndarray
Original dataset.
metadata : dict
Metadata.
Returns
-------
data : dict of numpy.ndarray
Converted dataset.
"""
gen_fields_lut = {
'inc_angle_trip': ('inc', np.float32, uint_nan),
'azi_angle_trip': ('azi', np.float32, int_nan),
'sigma0_trip': ('sig', np.float32, long_nan),
'soil_moisture': ('sm', np.float32, uint_nan),
'soil_moisture_error': ('sm_noise', np.float32, uint_nan),
'mean_surf_soil_moisture': ('sm_mean', np.float32, uint_nan),
'soil_moisture_sensetivity': ('sm_sens', np.float32, ulong_nan),
'sigma40': ('sig40', np.float32, long_nan),
'sigma40_error': ('sig40_noise', np.float32, long_nan),
'slope40': ('slope40', np.float32, long_nan),
'slope40_error': ('slope40_noise', np.float32, long_nan),
'dry_backscatter': ('dry_sig40', np.float32, long_nan),
'wet_backscatter': ('wet_sig40', np.float32, long_nan),
'as_des_pass': ('as_des_pass', np.uint8, None),
'aggregated_quality_flag': ('agg_flag', np.uint8, None),
'processing_flags': ('proc_flag', np.uint8, None),
'correction_flags': ('corr_flag', np.uint8, None),
'snow_cover_probability': ('snow_prob', np.uint8, None),
'frozen_soil_probability': ('frozen_prob', np.uint8, None),
'innudation_or_wetland': ('wetland', np.uint8, None),
'topographical_complexity': ('topo', np.uint8, None),
'kp': ('kp', np.float32, uint_nan)}
skip_fields = ['flagfield_rf1', 'f_f', 'f_v', 'f_oa', 'f_sa', 'f_tel']
for var_name in skip_fields:
if var_name in data:
data.pop(var_name)
for var_name, (new_name, new_dtype, nan_val) in gen_fields_lut.items():
data[new_name] = data.pop(var_name).astype(new_dtype)
if nan_val is not None:
data[new_name][data[new_name] == nan_val] = float32_nan
data['sat_id'] = np.repeat(metadata['sat_id'], data['time'].size)
return data
def read_eps_l1b(filename, generic=False, to_xarray=False, full=True,
unsafe=False, scale_mdr=True):
"""
Level 1b reader and data preparation.
Parameters
----------
filename : str
ASCAT Level 1b file name in EPS Native format.
generic : bool, optional
'True' reading and converting into generic format or
'False' reading original field names (default: False).
to_xarray : bool, optional
'True' return data as xarray.Dataset
'False' return data as numpy.ndarray (default: False).
Returns
-------
ds : xarray.Dataset, dict of xarray.Dataset
ASCAT Level 1b data.
"""
eps_file = read_eps(filename, full=full, unsafe=unsafe,
scale_mdr=scale_mdr)
ptype = eps_file.mphr['PRODUCT_TYPE']
fmv = int(eps_file.mphr['FORMAT_MAJOR_VERSION'])
if ptype == 'SZF':
if fmv == 12:
data, metadata = read_szf_fmv_12(eps_file)
else:
raise RuntimeError("L1b SZF format version not supported.")
rename_coords = {'longitude_full': 'lon', 'latitude_full': 'lat'}
for k, v in rename_coords.items():
data[v] = data.pop(k)
if generic:
data = conv_epsl1bszf_generic(data, metadata)
# 1 Left Fore Antenna, 2 Left Mid Antenna 3 Left Aft Antenna
# 4 Right Fore Antenna, 5 Right Mid Antenna, 6 Right Aft Antenna
antennas = ['lf', 'lm', 'la', 'rf', 'rm', 'ra']
ds = OrderedDict()
for i, antenna in enumerate(antennas):
subset = data['beam_number'] == i+1
metadata['beam_number'] = i+1
metadata['beam_name'] = antenna
# convert spacecraft_id to internal sat_id
sat_id = np.array([4, 3, 5])
metadata['sat_id'] = sat_id[metadata['spacecraft_id']-1]
# convert dict to xarray.Dataset or numpy.ndarray
if to_xarray:
sub_data = {}
for var_name in data.keys():
if var_name == 'beam_number' and generic:
continue
if len(data[var_name].shape) == 1:
dim = ['obs']
elif len(data[var_name].shape) == 2:
dim = ['obs', 'echo']
sub_data[var_name] = (dim, data[var_name][subset])
coords = {}
coords_fields = ['lon', 'lat', 'time']
for cf in coords_fields:
coords[cf] = sub_data.pop(cf)
ds[antenna] = xr.Dataset(sub_data, coords=coords,
attrs=metadata)
else:
# collect dtype info
dtype = []
fill_values = {}
for var_name in data.keys():
if var_name == 'beam_number' and generic:
continue
if len(data[var_name][subset].shape) == 1:
dtype.append(
(var_name, data[var_name][subset].dtype.str))
elif len(data[var_name][subset].shape) > 1:
dtype.append((var_name, data[var_name][
subset].dtype.str, data[var_name][
subset].shape[1:]))
fill_values[var_name] = data[var_name].fill_value
ds[antenna] = np.ma.empty(
data['time'][subset].size, dtype=np.dtype(dtype))
for var_name, v in data.items():
if var_name == 'beam_number' and generic:
continue
ds[antenna][var_name] = v[subset]
ds[antenna][var_name].set_fill_value(fill_values[var_name])
elif ptype in ['SZR', 'SZO']:
if fmv == 11:
data, metadata = read_szx_fmv_11(eps_file)
elif fmv == 12:
data, metadata = read_szx_fmv_12(eps_file)
else:
raise RuntimeError("SZX format version not supported.")
data['time'] = jd2dt(data.pop('jd'))
rename_coords = {'longitude': 'lon', 'latitude': 'lat'}
for k, v in rename_coords.items():
data[v] = data.pop(k)
# convert spacecraft_id to internal sat_id
sat_id = np.array([4, 3, 5])
metadata['sat_id'] = sat_id[metadata['spacecraft_id']-1]
# add/rename/remove fields according to generic format
if generic:
data = conv_epsl1bszx_generic(data, metadata)
# convert dict to xarray.Dataset or numpy.ndarray
if to_xarray:
for k in data.keys():
if len(data[k].shape) == 1:
dim = ['obs']
elif len(data[k].shape) == 2:
dim = ['obs', 'beam']
data[k] = (dim, data[k])
coords = {}
coords_fields = ['lon', 'lat', 'time']
for cf in coords_fields:
coords[cf] = data.pop(cf)
ds = xr.Dataset(data, coords=coords, attrs=metadata)
else:
# collect dtype info
dtype = []
for var_name in data.keys():
if len(data[var_name].shape) == 1:
dtype.append((var_name, data[var_name].dtype.str))
elif len(data[var_name].shape) > 1:
dtype.append((var_name, data[var_name].dtype.str,
data[var_name].shape[1:]))
ds = np.empty(data['time'].size, dtype=np.dtype(dtype))
for k, v in data.items():
ds[k] = v
else:
raise RuntimeError("Format not supported. Product type {:1}"
" Format major version: {:2}".format(ptype, fmv))
return ds
def read_eps_l2(filename, generic=False, to_xarray=False):
"""
Level 2 reader and data preparation.
Parameters
----------
filename : str
ASCAT Level 1b file name in EPS Native format.
generic : bool, optional
'True' reading and converting into generic format or
'False' reading original field names (default: False).
to_xarray : bool, optional
'True' return data as xarray.Dataset
'False' return data as numpy.ndarray (default: False).
Returns
-------
ds : xarray.Dataset, dict of xarray.Dataset
ASCAT Level 1b data.
"""
eps_file = read_eps(filename)
ptype = eps_file.mphr['PRODUCT_TYPE']
fmv = int(eps_file.mphr['FORMAT_MAJOR_VERSION'])
if ptype in ['SMR', 'SMO']:
if fmv == 12:
data, metadata = read_smx_fmv_12(eps_file)
else:
raise RuntimeError("L2 SM format version not supported.")
data['time'] = jd2dt(data.pop('jd'))
rename_coords = {'longitude': 'lon', 'latitude': 'lat'}
for k, v in rename_coords.items():
data[v] = data.pop(k)
# convert spacecraft_id to internal sat_id
sat_id = np.array([4, 3, 5])
metadata['sat_id'] = sat_id[metadata['spacecraft_id']-1]
# add/rename/remove fields according to generic format
if generic:
data = conv_epsl2szx_generic(data, metadata)
# convert dict to xarray.Dataset or numpy.ndarray
if to_xarray:
for k in data.keys():
if len(data[k].shape) == 1:
dim = ['obs']
elif len(data[k].shape) == 2:
dim = ['obs', 'beam']
data[k] = (dim, data[k])
coords = {}
coords_fields = ['lon', 'lat', 'time']
for cf in coords_fields:
coords[cf] = data.pop(cf)
ds = xr.Dataset(data, coords=coords, attrs=metadata)
else:
# collect dtype info
dtype = []
for var_name in data.keys():
if len(data[var_name].shape) == 1:
dtype.append((var_name, data[var_name].dtype.str))
elif len(data[var_name].shape) > 1:
dtype.append((var_name, data[var_name].dtype.str,
data[var_name].shape[1:]))
ds = np.empty(data['time'].size, dtype=np.dtype(dtype))
for k, v in data.items():
ds[k] = v
else:
raise ValueError("Format not supported. Product type {:1}"
" Format major version: {:2}".format(ptype, fmv))
return ds
def read_eps(filename, mphr_only=False, full=True, unsafe=False,
scale_mdr=True):
"""
Read EPS file.
Parameters
----------
filename : str
Filename
Returns
-------
prod : EPSProduct
EPS data.
"""
zipped = False
if os.path.splitext(filename)[1] == '.gz':
zipped = True
# for zipped files use an unzipped temporary copy
if zipped:
with NamedTemporaryFile(delete=False) as tmp_fid:
with GzipFile(filename) as gz_fid:
tmp_fid.write(gz_fid.read())
filename = tmp_fid.name
# create the eps object with the filename and read it
prod = EPSProduct(filename)
if mphr_only:
mphr = prod.read_mphr()
prod.mphr = mphr
else:
prod.read(full, unsafe, scale_mdr)
# remove the temporary copy
if zipped:
os.remove(filename)
return prod
def read_szx_fmv_11(eps_file):
"""
Read SZO/SZR format version 11.
Parameters
----------
eps_file : EPSProduct object
EPS Product object.
Returns
-------
data : numpy.ndarray
SZO/SZR data.
"""
raw_data = eps_file.scaled_mdr
raw_unscaled = eps_file.mdr
mphr = eps_file.mphr
n_node_per_line = raw_data['LONGITUDE'].shape[1]
n_lines = raw_data['LONGITUDE'].shape[0]
n_records = raw_data['LONGITUDE'].size
data = {}
metadata = {}
idx_nodes = np.arange(n_lines).repeat(n_node_per_line)
ascat_time = shortcdstime2jd(raw_data['UTC_LINE_NODES'].flatten()['day'],
raw_data['UTC_LINE_NODES'].flatten()['time'])
data['jd'] = ascat_time[idx_nodes]
metadata['spacecraft_id'] = np.int8(mphr['SPACECRAFT_ID'][-1])
metadata['orbit_start'] = np.uint32(mphr['ORBIT_START'])
fields = ['processor_major_version', 'processor_minor_version',
'format_major_version', 'format_minor_version']
for f in fields:
metadata[f] = np.int16(mphr[f.upper()])
fields = ['sat_track_azi']
for f in fields:
data[f] = raw_data[f.upper()].flatten()[idx_nodes]
fields = [('longitude', long_nan), ('latitude', long_nan),
('swath_indicator', byte_nan)]
for f, nan_val in fields:
data[f] = raw_data[f.upper()].flatten()
valid = raw_unscaled[f.upper()].flatten() != nan_val
data[f][~valid] = nan_val
fields = [('sigma0_trip', long_nan),
('inc_angle_trip', uint_nan),
('azi_angle_trip', int_nan),
('kp', uint_nan),
('f_kp', byte_nan),
('f_usable', byte_nan),
('f_f', uint_nan),
('f_v', uint_nan),
('f_oa', uint_nan),
('f_sa', uint_nan),
('f_tel', uint_nan),
('f_land', uint_nan)]
for f, nan_val in fields:
data[f] = raw_data[f.upper()].reshape(n_records, 3)
valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val
data[f][~valid] = nan_val
# modify longitudes from (0, 360) to (-180,180)
mask = np.logical_and(data['longitude'] != long_nan,
data['longitude'] > 180)
data['longitude'][mask] += -360.
# modify azimuth from (-180, 180) to (0, 360)
mask = (data['azi_angle_trip'] != int_nan) & (data['azi_angle_trip'] < 0)
data['azi_angle_trip'][mask] += 360
data['node_num'] = np.tile((np.arange(n_node_per_line) + 1),
n_lines).astype(np.uint8)
data['line_num'] = idx_nodes.astype(np.uint16)
data['as_des_pass'] = (data['sat_track_azi'] < 270).astype(np.uint8)
return data, metadata
def read_szx_fmv_12(eps_file):
"""
Read SZO/SZR format version 12.
Parameters
----------
eps_file : EPSProduct object
EPS Product object.
Returns
-------
data : numpy.ndarray
SZO/SZR data.
"""
raw_data = eps_file.scaled_mdr
raw_unscaled = eps_file.mdr
mphr = eps_file.mphr
n_node_per_line = raw_data['LONGITUDE'].shape[1]
n_lines = raw_data['LONGITUDE'].shape[0]
n_records = raw_data['LONGITUDE'].size
data = {}
metadata = {}
idx_nodes = np.arange(n_lines).repeat(n_node_per_line)
ascat_time = shortcdstime2jd(raw_data['UTC_LINE_NODES'].flatten()['day'],
raw_data['UTC_LINE_NODES'].flatten()['time'])
data['jd'] = ascat_time[idx_nodes]
metadata['spacecraft_id'] = np.int8(mphr['SPACECRAFT_ID'][-1])
metadata['orbit_start'] = np.uint32(mphr['ORBIT_START'])
fields = ['processor_major_version', 'processor_minor_version',
'format_major_version', 'format_minor_version']
for f in fields:
metadata[f] = np.int16(mphr[f.upper()])
fields = ['degraded_inst_mdr', 'degraded_proc_mdr', 'sat_track_azi',
'abs_line_number']
for f in fields:
data[f] = raw_data[f.upper()].flatten()[idx_nodes]
fields = [('longitude', long_nan), ('latitude', long_nan),
('swath indicator', byte_nan)]
for f, nan_val in fields:
data[f] = raw_data[f.upper()].flatten()
valid = raw_unscaled[f.upper()].flatten() != nan_val
data[f][~valid] = nan_val
fields = [('sigma0_trip', long_nan),
('inc_angle_trip', uint_nan),
('azi_angle_trip', int_nan),
('kp', uint_nan),
('num_val_trip', ulong_nan),
('f_kp', byte_nan),
('f_usable', byte_nan),
('f_f', uint_nan),
('f_v', uint_nan),
('f_oa', uint_nan),
('f_sa', uint_nan),
('f_tel', uint_nan),
('f_ref', uint_nan),
('f_land', uint_nan)]
for f, nan_val in fields:
data[f] = raw_data[f.upper()].reshape(n_records, 3)
valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val
data[f][~valid] = nan_val
# modify longitudes from (0, 360) to (-180,180)
mask = np.logical_and(data['longitude'] != long_nan,
data['longitude'] > 180)
data['longitude'][mask] += -360.
# modify azimuth from (-180, 180) to (0, 360)
mask = (data['azi_angle_trip'] != int_nan) & (data['azi_angle_trip'] < 0)
data['azi_angle_trip'][mask] += 360
data['node_num'] = np.tile((np.arange(n_node_per_line) + 1),
n_lines).astype(np.uint8)
data['line_num'] = idx_nodes.astype(np.uint16)
data['as_des_pass'] = (data['sat_track_azi'] < 270).astype(np.uint8)
data['swath_indicator'] = data.pop('swath indicator')
return data, metadata
def read_szf_fmv_12(eps_file):
"""
Read SZF format version 12.
beam_num
- 1 Left Fore Antenna
- 2 Left Mid Antenna
- 3 Left Aft Antenna
- 4 Right Fore Antenna
- 5 Right Mid Antenna
- 6 Right Aft Antenna
as_des_pass
- 0 Ascending
- 1 Descending
swath_indicator
- 0 Left
- 1 Right
Parameters
----------
eps_file : EPSProduct object
EPS Product object.
Returns
-------
data : numpy.ndarray
SZF data.
"""
data = {}
metadata = {}
n_lines = eps_file.mdr_counter
n_node_per_line = eps_file.mdr['LONGITUDE_FULL'].shape[1]
idx_nodes = np.arange(n_lines).repeat(n_node_per_line)
# extract metadata
metadata['spacecraft_id'] = np.int8(eps_file.mphr['SPACECRAFT_ID'][-1])
metadata['orbit_start'] = np.uint32(eps_file.mphr['ORBIT_START'])
fields = ['processor_major_version', 'processor_minor_version',
'format_major_version', 'format_minor_version']
for f in fields:
metadata[f] = np.int16(eps_file.mphr[f.upper()])
# extract time
dt = np.datetime64('2000-01-01') + eps_file.mdr[
'UTC_LOCALISATION']['day'].astype('timedelta64[D]') + eps_file.mdr[
'UTC_LOCALISATION']['time'].astype('timedelta64[ms]')
data['time'] = dt[idx_nodes]
fields = ['degraded_inst_mdr', 'degraded_proc_mdr', 'sat_track_azi',
'beam_number', 'flagfield_rf1', 'flagfield_rf2',
'flagfield_pl', 'flagfield_gen1']
# extract data
for f in fields:
if eps_file.mdr_sfactor[f.upper()] == 1:
data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]
else:
data[f] = (eps_file.mdr[f.upper()].flatten() *
1./eps_file.mdr_sfactor[f.upper()])[idx_nodes]
data['swath_indicator'] = (data[
'beam_number'].flatten() > 3).astype(np.uint8)
data['as_des_pass'] = (data['sat_track_azi'] < 270).astype(np.uint8)
fields = [('longitude_full', long_nan),
('latitude_full', long_nan),
('sigma0_full', long_nan),
('inc_angle_full', uint_nan),
('azi_angle_full', int_nan),
('land_frac', uint_nan),
('flagfield_gen2', byte_nan)]
for f, nan_val in fields:
data[f] = eps_file.mdr[f.upper()].flatten()
invalid = eps_file.mdr[f.upper()].flatten() == nan_val
if eps_file.mdr_sfactor[f.upper()] != 1:
data[f] = data[f] * 1./eps_file.mdr_sfactor[f.upper()]
data[f][invalid] = nan_val
# modify longitudes from (0, 360) to (-180, 180)
mask = np.logical_and(data['longitude_full'] != long_nan,
data['longitude_full'] > 180)
data['longitude_full'][mask] += -360.
# modify azimuth from (-180, 180) to (0, 360)
idx = (data['azi_angle_full'] != int_nan) & (data['azi_angle_full'] < 0)
data['azi_angle_full'][idx] += 360
# set flags
data['f_usable'] = set_flags(data)
return data, metadata
def read_smx_fmv_12(eps_file):
"""
Read SMO/SMR format version 12.
Parameters
----------
eps_file : EPSProduct object
EPS Product object.
Returns
-------
data : numpy.ndarray
SMO/SMR data.
"""
raw_data = eps_file.scaled_mdr
raw_unscaled = eps_file.mdr
n_node_per_line = raw_data['LONGITUDE'].shape[1]
n_lines = raw_data['LONGITUDE'].shape[0]
n_records = eps_file.mdr_counter * n_node_per_line
idx_nodes = np.arange(eps_file.mdr_counter).repeat(n_node_per_line)
data = {}
metadata = {}
metadata['spacecraft_id'] = np.int8(eps_file.mphr['SPACECRAFT_ID'][-1])
metadata['orbit_start'] = np.uint32(eps_file.mphr['ORBIT_START'])
ascat_time = shortcdstime2jd(raw_data['UTC_LINE_NODES'].flatten()['day'],
raw_data['UTC_LINE_NODES'].flatten()['time'])
data['jd'] = ascat_time[idx_nodes]
fields = [('sigma0_trip', long_nan),
('inc_angle_trip', uint_nan),
('azi_angle_trip', int_nan),
('kp', uint_nan),
('f_land', uint_nan)]
for f, nan_val in fields:
data[f] = raw_data[f.upper()].reshape(n_records, 3)
valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val
data[f][~valid] = nan_val
fields = ['sat_track_azi', 'abs_line_number']
for f in fields:
data[f] = raw_data[f.upper()].flatten()[idx_nodes]
fields = [('longitude', long_nan, long_nan),
('latitude', long_nan, long_nan),
('swath_indicator', byte_nan, byte_nan),
('soil_moisture', uint_nan, uint_nan),
('soil_moisture_error', uint_nan, uint_nan),
('sigma40', long_nan, long_nan),
('sigma40_error', long_nan, long_nan),
('slope40', long_nan, long_nan),
('slope40_error', long_nan, long_nan),
('dry_backscatter', long_nan, long_nan),
('wet_backscatter', long_nan, long_nan),
('mean_surf_soil_moisture', uint_nan, uint_nan),
('soil_moisture_sensetivity', ulong_nan, float32_nan),
('correction_flags', uint8_nan, uint8_nan),
('processing_flags', uint8_nan, uint8_nan),
('aggregated_quality_flag', uint8_nan, uint8_nan),
('snow_cover_probability', uint8_nan, uint8_nan),
('frozen_soil_probability', uint8_nan, uint8_nan),
('innudation_or_wetland', uint8_nan, uint8_nan),
('topographical_complexity', uint8_nan, uint8_nan)]
for f, nan_val, new_nan_val in fields:
data[f] = raw_data[f.upper()].flatten()
valid = raw_unscaled[f.upper()].flatten() != nan_val
data[f][~valid] = new_nan_val
# sat_track_azi (uint)
data['as_des_pass'] = \
np.array(raw_data['SAT_TRACK_AZI'].flatten()[idx_nodes] < 270)
# modify longitudes from [0,360] to [-180,180]
mask = np.logical_and(data['longitude'] != long_nan,
data['longitude'] > 180)
data['longitude'][mask] += -360.
# modify azimuth from (-180, 180) to (0, 360)
mask = (data['azi_angle_trip'] != int_nan) & (data['azi_angle_trip'] < 0)
data['azi_angle_trip'][mask] += 360
fields = ['param_db_version', 'warp_nrt_version']
for f in fields:
data[f] = raw_data['PARAM_DB_VERSION'].flatten()[idx_nodes]
metadata['spacecraft_id'] = int(eps_file.mphr['SPACECRAFT_ID'][2])
data['node_num'] = np.tile((np.arange(n_node_per_line) + 1), n_lines)
data['line_num'] = idx_nodes
return data, metadata
def shortcdstime2jd(days, milliseconds):
"""
Convert cds time to julian date.
Parameters
----------
days : int
Days since 2000-01-01
milliseconds : int
Milliseconds.
Returns
-------
jd : float
Julian date.
"""
offset = days + (milliseconds / 1000.) / (24. * 60. * 60.)
return julian_epoch + offset
def set_flags(data):
"""
Compute summary flag for each measurement with a value of 0, 1 or 2
indicating nominal, slightly degraded or severely degraded data.
The format of ASCAT products is defined by
"EPS programme generic product format specification" (EPS.GGS.SPE.96167)
and "ASCAT level 1 product format specification" (EPS.MIS.SPE.97233).
bit name category description
------------------------------------
flagfield_rf1
0 fnoise amber noise missing, interpolated noise value used instead
1 fpgp amber degraded power gain product
2 vpgp red very degraded power gain product
3 fhrx amber degraded filter shape
4 vhrx red very degraded filter shape
flagfield_rf2
0 pgp_ool red power gain product is outside limits
1 noise_ool red measured noise value is outside limits
flagfield_pl
0 forb red orbit height is outside limits
1 fatt red no yaw steering
2 fcfg red unexpected instrument configuration
3 fman red satellite maneuver
4 fosv warning osv file missing (fman may be incorrect)
flagfield_gen1
0 ftel warning telemetry missing (ftool may be incorrect)
1 ftool red telemetry out of limits
flagfield_gen2
0 fsol amber possible interference from solar array
1 fland warning lat/long position is over land
2 fgeo red geolocation algorithm failed
Each flag has belongs to a particular category which indicates the impact
on data quality. Flags in the "amber" category indicate that the data is
slightly degraded but still usable. Flags in the "red" category indicate
that the data is severely degraded and should be discarded or
used with caution.
A simple algorithm for calculating a single summary flag with a value of
0, 1 or 2 indicating nominal, slightly degraded or severely degraded is
function calc_status( flags )
status = 0
if any amber flags are set then status = 1
if any red flags are set then status = 2
return status
Parameters
----------
data : numpy.ndarray
SZF data.
Returns
-------
f_usable : numpy.ndarray
Flag indicating nominal (0), slightly degraded (1) or
severely degraded(2).
"""
flag_status_bit = {'flagfield_rf1': np.array([1, 1, 2, 1, 2, 0, 0, 0]),
'flagfield_rf2': np.array([2, 2, 0, 0, 0, 0, 0, 0]),
'flagfield_pl': np.array([2, 2, 2, 2, 0, 0, 0, 0]),
'flagfield_gen1': np.array([0, 2, 0, 0, 0, 0, 0, 0]),
'flagfield_gen2': np.array([1, 0, 2, 0, 0, 0, 0, 0])}
f_usable = np.zeros(data['flagfield_rf1'].size, dtype=np.uint8)
for flagfield, bitmask in flag_status_bit.items():
subset = np.nonzero(data[flagfield])[0]
if subset.size > 0:
unpacked_bits = np.fliplr(np.unpackbits(
data[flagfield][subset]).reshape(-1, 8).astype(np.bool))
flag = np.ma.array(
np.tile(bitmask, unpacked_bits.shape[0]).reshape(-1, 8),
mask=~unpacked_bits, fill_value=0)
f_usable[subset] = np.max(np.vstack(
(f_usable[subset], flag.filled().max(axis=1))), axis=0)
return f_usable
|
"""
Post Class
for lap_joint script
Post:
.id int Post identifier
.brep Brep Brep representing Post
.profile Curve Curve defining end profile of post
.axis Line Line between centers of end faces
.origin Point start of axis Line
.orientation Plane plane with normal along axis and x-axis towards
center of one face
.pockets list list of Pockets on this Post
.isConnected Bool true if this post is part of a joint
.selfToGlobal Transform convert local coordinates to global
.globalToSelf Transform convert global coordinates to local
.millToGlobal Transform convert unrotated mill coordinates to global
"""
import Rhino
import scriptcontext as sc
import rhinoscriptsyntax as rs
import math
import common
from toolpath import *
class Post:
"""A single post in the system."""
def __init__(self, axis=None, obRef=None, roll=None, group=None, width=None, height=None, id=None):
"""Initialize a Post.
Gathers all information about this Post
Offers multiple ways to describe a Post:
Start with Rhino object:
obRef: reference to a Rhino object
Start with lines:
group: obRef of one object in a group
OR
axis: central axis of the post
roll: (optional), line normal to axis, defines roll of post
For a rectangular Post:
width: width along roll axis
height: other short dimension of Post
"""
self.isConnected = False
self.brep = None
#not sure about this. id is None until assigned by the Structure?
self.id = id
if group: #creating Post with axis and roll lines grouped together
#find group this object belongs to
groups = group.Object().Attributes.GetGroupList()
if len(groups) < 1:
raise NameError("Object does not belong to a group.")
group_id = groups[0]
#get all objects in the group
objects = rs.ObjectsByGroup(sc.doc.Groups.GroupName(group_id))
if len(objects) != 2:
raise NameError("Group does not have two objects (axis, roll).")
#get actual curves
curves = [sc.doc.Objects.Find(ob).CurveGeometry for ob in objects]
#convert to lines
lines = [Rhino.Geometry.Line(c.PointAtStart, c.PointAtEnd) for c in curves]
#roll is shorter than axis
roll, axis = sorted(lines, key=lambda l: l.Length)
if axis: #creating Post based on lines
if not (width and height): #currently only rectangular solids.
raise NameError("Height and width required if an object is not given.")
if type(axis) is Rhino.DocObjects.ObjRef: #axis is objref to a curve
#find actual curve geometry
axis = axis.Geometry()
if type(axis) is Rhino.DocObjects.ObjectType.Curve:
self.axis = Rhino.Geometry.Line(axis.PointAtStart, axis.PointAtEnd)
else: #assume for now axis is either curve or internal line object
self.axis = axis
if roll:
#if roll is a curve, convert it to a Line
if type(roll) == Rhino.DocObjects.ObjectType.Curve:
roll = Rhino.Geometry.Line(roll.PointAtStart, roll.PointAtEnd)
self.orientation = rs.PlaneFromNormal(self.axis.From,
self.axis.UnitTangent, roll.UnitTangent)
else:
#construct orientation with default roll angle
self.orientation = rs.PlaneFromNormal(self.axis.From,
self.axis.UnitTangent)
#construct rectangular profile curve
self.profile = self.makeRectProfile(width, height)
elif obRef: #no axis, need obRef
object = obRef.Object()
if object is None:
raise NameError("No object found corresponding to reference " + str(obRef))
#actual object geometry
self.brep = common.getBrep(object)
#assume smallest faces are the ends of the Post
endFaces = sorted(self.brep.Faces, key=rs.SurfaceArea)[0:2]
#get curve defining post profile
self.profile = Rhino.Geometry.Curve.JoinCurves(endFaces[0].DuplicateFace(False).DuplicateEdgeCurves())
#axis is a Line between centers of smallest faces.
self.axis = Rhino.Geometry.Line(
*[rs.SurfaceAreaCentroid(face)[0] for face in endFaces])
else : #no axis and no obRef
raise NameError('No valid axis or obRef given.')
#just for convenience and simplicity
self.origin = self.axis.From
#get orientation of Post
self.orientation = self.findOrientation()
#store conversions to and from Post's orientation
#rotate 90 degrees about y axis to align posts with x instead of z axis
self.globalToSelf = Rhino.Geometry.Transform.Rotation(1,0,
Rhino.Geometry.Vector3d.YAxis, Rhino.Geometry.Point3d.Origin)
#transform global coordinates to post's local coordinates
self.globalToSelf *= Rhino.Geometry.Transform.ChangeBasis(
Rhino.Geometry.Plane.WorldXY, self.orientation)
#go the other way
self.selfToGlobal = self.globalToSelf.TryGetInverse()[1]
#initialize list of this Post's Pockets
self.pockets = []
###########
#Post Class Functions
def info(self):
"""Displays a text summary of this post."""
print "Post: " + self.printId() + \
"\n Length: " + str(round(self.axis.Length, 2)) + \
"\n Origin: " + common.printPoint3d(self.origin) + \
"\n----"
def display(self, objects=None):
"""Create objects in viewport to display information about this post.
'objects' determines which objects to display
Creates:
label text dot with post id
orientation aligned plane with corner on post origin
profile profile curve
object post object, if not using obrefs
axis axis Line
Returns: list of guids of added objects
"""
guids = []
if objects == None:
objects = ['label', 'orientation']
if 'label' in objects:
guids.append(rs.AddTextDot(self.printId(), self.origin))
if 'orientation' in objects:
guids.append(common.displayPlane(self.orientation))
if 'profile' in objects:
guids.append(sc.doc.Objects.AddCurve(self.profile))
if 'object' in objects:
if not self.brep:
vector = Rhino.Geometry.Vector3d(self.axis.To - self.axis.From)
guids.append(sc.doc.Objects.AddBrep(
Rhino.Geometry.Surface.CreateExtrusion(self.profile, vector).ToBrep()))
rs.CapPlanarHoles(guids[-1])
if 'axis' in objects:
guids.append(sc.doc.Objects.AddLine(self.axis))
if 'xAxis' in objects:
guids.append(sc.doc.Objects.AddLine(self.origin, self.origin + self.orientation.XAxis))
return guids
def printId(self):
"""return id with type letter"""
return 'p' + str(self.id)
def findOrientation(self):
"""Find the orientation (direction and roll) of a post.
Returns: plane with normal along axis and x-axis towards center of one face.
"""
#grab one edge of profile arbitrarily
if type(self.profile) is Rhino.Geometry.PolyCurve:
one_edge = self.profile.Explode()[0]
else:
raise NameError("Profile is wrong type of curve: " + str(type(self.profile)))
middle_of_edge = one_edge.PointAtNormalizedLength(.5)
#create plane from origin, normal vector, and x-axis vector
return rs.PlaneFromNormal(self.origin,
self.axis.UnitTangent,
rs.VectorCreate(self.origin, middle_of_edge))
def makeRoll(self):
"""Construct a default horizontal roll angle"""
#get plane normal to axis at arbitrary rotation
plane = rs.PlaneFromNormal(self.axis.From, self.axis.UnitTangent)
#set roll to horizontal component of x axis
roll = Rhino.Geometry.Vector3d(plane.XAxis.X, plane.XAxis.Y, 0)
if roll.IsZero:
roll = plane.YAxis
return Rhino.Geometry.Line(self.axis.From, plane.XAxis)
def makeRectProfile(self, width, height):
"""create a Post profile using the Post's orientation
Returns: rectangular PolyCurve boundary
"""
#get corner uv coordinates
corners = [[width/2, height/2], [width/2, -height/2],
[-width/2, -height/2], [-width/2, height/2]]
#close curve
corners.append(corners[0])
#convert local uvs to global points
points = [self.orientation.PointAt(c[0], c[1]) for c in corners]
#create polylinecurve
polyline = Rhino.Geometry.Polyline(points)
#get list of edge curves
curves = [Rhino.Geometry.LineCurve(line) for line in polyline.GetSegments()]
#join as polycurve
return Rhino.Geometry.Curve.JoinCurves(curves)[0]
def makeGcode(self, gcode=False):
"""Convert mill paths of each pocket into Gcode for the entire Post
Returns: gcode string for milling post
"""
if not gcode:
gcode = common.Gcode()
gcode.text += common.settings.gcode['preamble'] + "\n"
gcode.text += "(Starting Post {0})\n".format(self.printId())
for p in self.pockets:
p.makeGcode(gcode=gcode)
#get coordinates of home point
home = str(common.settings.gcode['home']).split(',')
home = [round(float(x), common.settings.gcode['precision']) for x in home]
#return to home point when finished
Rapid(Rhino.Geometry.Point3d(*home[0:3]), A=home[3], clear=True).makeGcode(gcode=gcode)
return gcode
# End Post Class # |
# import socket programming library
import socket
import sys
from packet_parser import Parser
# import thread module
from _thread import *
import threading
parser = Parser()
print_lock = threading.Lock()
stateOfApllication = True
# thread function
def threaded(c):
while True:
# data received from client
data = c.recv(1024).decode()
if not data:
#print('Shutting program')
#parser.receive_message('bye:bye')
print_lock.release()
#stateOfApllication = False
break
if (str(data) == 'bye:bye'):
print('Shutting program')
parser.receive_message('bye:bye')
sys.exit()
#data = data[::-1]
print(str(data))
parser.receive_message(str(data))
# connection closed
c.close()
def Main():
host = ""
# reverse a port on your computer
# in our case it is 12345 but it
# can be anything
port = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
print("socket binded to port", port)
# put the socket into listening mode
s.listen(5)
print("socket is listening")
# a forever loop until client wants to exit
while True:
# establish connection with client
c, addr = s.accept()
# lock acquired by client
print_lock.acquire()
print('Connected to :', addr[0], ':', addr[1])
# Start a new thread and return its identifier
start_new_thread(threaded, (c,))
s.close()
if __name__ == '__main__':
Main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides a processor that adds interactions from blocks to molecules.
"""
# TODO: Move all this functionality to do_mapping?
from collections import ChainMap
from itertools import product
from .processor import Processor
from ..graph_utils import make_residue_graph
from ..molecule import Molecule
def apply_blocks(molecule, blocks):
"""
Generate a new :class:`~vermouth.molecule.Molecule` based on the residue
names and other attributes of `molecule` from `blocks`.
Parameters
----------
molecule: vermouth.molecule.Molecule
The molecule to process.
blocks: dict[str, vermouth.molecule.Block]
The blocks known.
Returns
-------
vermouth.molecule.Molecule
A new molecule with attributes from the old `molecule`, as well as all
interactions described by `blocks`.
"""
graph_out = Molecule(
force_field=molecule.force_field,
meta=molecule.meta.copy()
)
residue_graph = make_residue_graph(molecule)
# nrexcl may not be defined, but if it is we probably want to keep it
try:
graph_out.nrexcl = molecule.nrexcl
except AttributeError:
graph_out.nrexcl = None
old_to_new_idxs = {}
at_idx = 0
charge_group_offset = 0
for res_idx in residue_graph:
residue = residue_graph.nodes[res_idx]
res_graph = residue['graph']
resname = residue['resname']
block = blocks[resname]
atname_to_idx = {}
if graph_out.nrexcl is None:
if hasattr(block, 'nrexcl'):
graph_out.nrexcl = block.nrexcl
else:
if (hasattr(block, 'nrexcl')
and block.nrexcl is not None
and block.nrexcl != graph_out.nrexcl):
raise ValueError('Not all blocks share the same value for "nrexcl".')
for block_idx in block:
atname = block.nodes[block_idx]['atomname']
atom = list(res_graph.find_atoms(atomname=atname))
assert len(atom) == 1, (block.name, atname, atom)
old_to_new_idxs[atom[0]] = at_idx
atname_to_idx[atname] = at_idx
attrs = molecule.nodes[atom[0]]
graph_out.add_node(at_idx, **ChainMap(block.nodes[atname], attrs))
graph_out.nodes[at_idx]['graph'] = molecule.subgraph(atom)
graph_out.nodes[at_idx]['charge_group'] += charge_group_offset
graph_out.nodes[at_idx]['resid'] = attrs['resid']
at_idx += 1
charge_group_offset = graph_out.nodes[at_idx - 1]['charge_group']
for idx, jdx, data in block.edges(data=True):
idx = atname_to_idx[idx]
jdx = atname_to_idx[jdx]
graph_out.add_edge(idx, jdx, **data)
for inter_type, interactions in block.interactions.items():
for interaction in interactions:
atom_idxs = []
for atom_name in interaction.atoms:
atom_index = graph_out.find_atoms(atomname=atom_name,
resname=residue['resname'],
resid=residue['resid'])
atom_index = list(atom_index)
if not atom_index:
msg = ('Could not find a atom named "{}" '
'with resname being "{}" '
'and resid being "{}".')
raise ValueError(msg.format(atom_name, residue['resname'], residue['resid']))
atom_idxs.extend(atom_index)
interactions = interaction._replace(atoms=atom_idxs)
graph_out.add_interaction(inter_type, *interactions)
# This makes edges between residues. We need to do this, since they can't
# come from the blocks and we need them to find the links locations.
# TODO This should not be done here, but by do_mapping, which might *also*
# do it at the moment
for res_idx, res_jdx in residue_graph.edges():
for old_idx, old_jdx in product(residue_graph.nodes[res_idx]['graph'],
residue_graph.nodes[res_jdx]['graph']):
try:
# Usually termini, PTMs, etc
idx = old_to_new_idxs[old_idx]
jdx = old_to_new_idxs[old_jdx]
except KeyError:
continue
if molecule.has_edge(old_idx, old_jdx):
graph_out.add_edge(idx, jdx)
return graph_out
class ApplyBlocks(Processor):
def run_molecule(self, molecule):
return apply_blocks(molecule, molecule.force_field.blocks)
|
from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
profile_photo = models.ImageField(upload_to = 'images/',blank=True)
bio = models.TextField(max_length = 50,null = True)
user = models.OneToOneField(User,on_delete=models.CASCADE)
def __str__(self):
return self.comment
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Project(models.Model):
image = models.ImageField(upload_to = 'images/')
project_name = models.CharField(max_length =10)
project_url = models.CharField(max_length =50)
description =models.CharField(max_length =100)
user= models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.bio
def save_project(self):
self.save()
def delete_project(self):
self.delete()
# START OF THE CLASSES FOR RATING
class DesignRating(models.Model):
RATING_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6,'6'),
(7,'7'),
(8,'8'),
(9,'9'),
(10,'10')
)
project = models.ForeignKey('Project')
user = models.ForeignKey(User)
rating = models.IntegerField(choices=RATING_CHOICES, null=True)
def __str__(self):
return self.rating
def save_designrating(self):
self.save()
def delete_designrating(self):
self.delete()
class UsabilityRating(models.Model):
RATING_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6,'6'),
(7,'7'),
(8,'8'),
(9,'9'),
(10,'10')
)
project = models.ForeignKey('Project')
user = models.ForeignKey(User)
rating = models.IntegerField(choices=RATING_CHOICES, null=True)
def __str__(self):
return self.rating
def save_usabilityrating(self):
self.save()
def delete_usabilityrating(self):
self.delete()
class ContentRating(models.Model):
RATING_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6,'6'),
(7,'7'),
(8,'8'),
(9,'9'),
(10,'10')
)
project = models.ForeignKey('Project')
user = models.ForeignKey(User)
rating = models.IntegerField(choices=RATING_CHOICES, null=True)
def __str__(self):
return self.rating
def save_contentrating(self):
self.save()
def delete_contentrating(self):
self.delete()
|
import datetime
import pytest
from django.core.management import call_command
from dashboard.testing import BuilderTestCase
from apps.flowers.builder import direct_wholesale_04
from apps.dailytrans.models import DailyTran
from apps.flowers.models import Flower
from apps.configs.models import Source
from django.db.models import Q
@pytest.mark.secret
class BuilderTestCase(BuilderTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# load fixtures
call_command('loaddata', 'configs.yaml', verbosity=0)
call_command('loaddata', 'sources.yaml', verbosity=0)
call_command('loaddata', 'cog04.yaml', verbosity=0)
cls.start_date = datetime.date(year=2018, month=3, day=6)
cls.end_date = datetime.date(year=2018, month=3, day=8)
def test_direct_single(self):
result = direct_wholesale_04(start_date=self.start_date, end_date=self.end_date)
self.assertTrue(result.success)
obj = Flower.objects.filter(code='L', track_item=True).first()
self.assertIsNotNone(obj)
sources = Source.objects.filter(Q(name__exact='臺北花市') | Q(name__exact='臺中市場'))
qs = DailyTran.objects.filter(product=obj,
source__in=sources,
date__range=(self.start_date, self.end_date))
self.assertEqual(qs.count(), 6)
|
import panscore
def load(filename:str)->panscore.Score:
#打开文件,返回panscore.Score对象
#由于编码不确定,先用二进制打开文件
with open(filename,'rb') as f:
file=f.read()
#读取编码
if(b"Charset=UTF-8" in file):
encoding="utf-8"
else:
encoding="shift-JIS"
#分块
blocks=[]
block=[]
for line in file.split(b"\n"):
line=line.strip(b"\r")
#逐行解码
try:
linestr=str(line,encoding=encoding)
except UnicodeDecodeError:
#如果某行编码与其他行不同,则尝试用各种编码解析
for i in ["gbk","utf-8","shift-JIS"]:
try:
linestr=str(line,encoding=i)
break
except UnicodeDecodeError:
pass
else:
linestr=""
if(linestr.startswith("[")):
blocks.append(block)
block=[]
block.append(linestr)
#读文件头
"""
fileproperties={}
for line in blocks[2]:
if("=" in line):
[key,value]=line.split("=")
if(value!=""):
fileproperties[key]=ustvaluetyper(key,value)
tempo=fileproperties.pop("Tempo",120.0)
"""
#读音符
notes=[]
time=0
for block in blocks[3:]:
noteproperties={}
length=0
notenum=60
lyric="R"
for line in block:
if("=" in line):
[key,value]=line.split("=")
if(key=="Length"):
length=int(value)
elif(key=="NoteNum"):
notenum=int(value)
elif(key=="Lyric"):
lyric=value.strip(" \n")
if(not (lyric in {"R","r"})):
notes.append(panscore.Note(start=time,
length=length,
notenum=notenum,
lyric=lyric))
time+=length
return panscore.Score(track=[panscore.Track(note=notes)])
#TODO
pass
def save(score:panscore.Score,filename:str,track:int=0):
#将panscore.Score对象保存为文件
s='[#VERSION]\nUST Version1.2\nCharset=UTF-8\n[#SETTING]\n'
noteindex=0#音符序号
time=0
def dumpnote(length:int,notenum:int,lyric:int)->str:
return "[#{:0>4}]\nLength={}\nNoteNum={}\nLyric={}\n".format(noteindex,length,notenum,lyric)
tr=score.track[track]
for note in tr.note:
if(note.start>time):
s+=dumpnote(note.start-time,60,"R")#休止符
noteindex+=1
s+=dumpnote(note.length,note.notenum,note.lyric)
noteindex+=1
time=note.start+note.length
s+="[#TRACKEND]\n"
with open(filename,"w",encoding="utf8") as file:
file.write(s)
|
# Alphabeticak order
# TODO add method and class definition modules
from .bayesianEstimator import BayesianEstimator
from .errorEstimator import ErrorEstimator
from .estimationAssembler import EstimationAssembler
from .hierarchyOptimiser import HierarchyOptimiser
from .modelEstimator import ModelEstimator
from .momentEstimator import MomentEstimator
from .monoCriterion import MonoCriterion
from .monteCarloIndex import MonteCarloIndex
from .monteCarloSampler import MonteCarloSampler
from .multiCriterion import MultiCriterion
from .methodDefs_multiCriterion.flag import *
from .randomGeneratorWrapper import RandomGeneratorWrapper
from .sampleGenerator import SampleGenerator
from .solverWrapper import SolverWrapper
from .statisticalEstimator import StatisticalEstimator
from .tools import *
from .xmcAlgorithm import XMCAlgorithm
|
import cioppy
import geopandas as gp
import os
import pandas as pd
from py_snap_helpers import *
from shapely.geometry import box
from snappy import jpy
from snappy import ProductIO
import gdal
import osr
import ogr
from shapely.geometry import box
import json
import sys
sys.path.append('/opt/OTB/lib/python')
sys.path.append('/opt/OTB/lib/libfftw3.so.3')
sys.path.append('/opt/anaconda/bin')
os.environ['OTB_APPLICATION_PATH'] = '/opt/OTB/lib/otb/applications'
os.environ['LD_LIBRARY_PATH'] = '/opt/OTB/lib'
os.environ['ITK_AUTOLOAD_PATH'] = '/opt/OTB/lib/otb/applications'
import otbApplication
from gdal_calc import Calc as gdalCalc
def get_metadata(input_references, data_path):
ciop = cioppy.Cioppy()
if isinstance(input_references, str):
search_params = dict()
search_params['do'] = 'terradue'
products = gp.GeoDataFrame(ciop.search(end_point=input_references,
params=search_params,
output_fields='identifier,self,wkt,startdate,enddate,enclosure,orbitDirection,track,orbitNumber',
model='EOP'))
else:
temp_results = []
for index, self in enumerate(input_references):
search_params = dict()
search_params['do'] = 'terradue'
temp_results.append(ciop.search(end_point=self,
params=search_params,
output_fields='identifier,self,wkt,startdate,enddate,enclosure,orbitDirection,track,orbitNumber',
model='EOP')[0])
products = gp.GeoDataFrame(temp_results)
products = products.merge(products.apply(lambda row: analyse(row, data_path), axis=1),
left_index=True,
right_index=True)
return products
def analyse(row, data_path):
series = dict()
series['local_path'] = os.path.join(data_path, row['identifier'], row['identifier'] + '.SAFE', 'manifest.safe')
return pd.Series(series)
def group_analysis(df):
df['ordinal_type'] = 'NaN'
slave_date=df['startdate'].min()[:10]
master_date=df['startdate'].max()[:10]
for i in range(len(df)):
if slave_date == df.iloc[i]['startdate'][:10]:
df.loc[i,'ordinal_type']='slave'
elif master_date == df.iloc[i]['startdate'][:10]:
df.loc[i,'ordinal_type']='master'
return
def bbox_to_wkt(bbox):
return box(*[float(c) for c in bbox.split(',')]).wkt
def pre_process(products, aoi, utm_zone, resolution='10.0', polarization=None, orbit_type=None, show_graph=False):
master_products=products[products['ordinal_type']=='master'].reset_index(drop=True)
slave_products=products[products['ordinal_type']=='slave'].reset_index(drop=True)
####Read and Assemble Masters
master_graph=GraphProcessor()
master_read_nodes = []
output_name_m='mst_' + master_products.iloc[0]['identifier'][:25]
for index, product in master_products.iterrows():
output_name_m += '_'+product['identifier'][-4:]
operator = 'Read'
parameters = get_operator_default_parameters(operator)
node_id = 'Read-M-{0}'.format(index)
source_node_id = ''
parameters['file'] = product.local_path
master_graph.add_node(node_id,
operator,
parameters,
source_node_id)
source_node_id_m = node_id
master_read_nodes.append(node_id)
if len(master_read_nodes)>1:
source_nodes_id = master_read_nodes
operator = 'SliceAssembly'
node_id = 'SliceAssembly-M'
parameters = get_operator_default_parameters(operator)
parameters['selectedPolarisations'] = polarization
master_graph.add_node(node_id,
operator,
parameters,
source_nodes_id)
source_node_id_m = node_id
###### Read and Assemble Slaves
slave_read_nodes = []
slave_graph = GraphProcessor()
output_name_s = 'slv_'+ slave_products.iloc[0]['identifier'][:25]
for index, product in slave_products.iterrows():
output_name_s += '_'+product['identifier'][-4:]
operator = 'Read'
parameters = get_operator_default_parameters(operator)
node_id = 'Read-S-{0}'.format(index)
source_node_id = ''
parameters['file'] = product.local_path
slave_graph.add_node(node_id,
operator,
parameters,
source_node_id)
source_node_id_s = node_id
slave_read_nodes.append(node_id)
if len(slave_read_nodes)>1:
source_nodes_id = slave_read_nodes
operator = 'SliceAssembly'
node_id = 'SliceAssembly-S'
parameters = get_operator_default_parameters(operator)
parameters['selectedPolarisations'] = polarization
slave_graph.add_node(node_id,
operator,
parameters,
source_nodes_id)
source_node_id_s = node_id
######Continue pre-processing master & slave products in two seperate graphs
operator = 'Subset'
parameters = get_operator_default_parameters(operator)
parameters['geoRegion'] = aoi
parameters['copyMetadata'] = 'true'
node_id = 'Subset-S'
slave_graph.add_node(node_id,
operator,
parameters,
source_node_id_s)
source_node_id_s = node_id
node_id = 'Subset-M'
master_graph.add_node(node_id,
operator,
parameters,
source_node_id_m)
source_node_id_m = node_id
operator = 'Apply-Orbit-File'
parameters = get_operator_default_parameters(operator)
if orbit_type == 'Restituted':
parameters['orbitType'] = 'Sentinel Restituted (Auto Download)'
node_id = 'Apply-Orbit-File-S'
slave_graph.add_node(node_id,
operator,
parameters,
source_node_id_s)
source_node_id_s = node_id
node_id = 'Apply-Orbit-File-M'
master_graph.add_node(node_id,
operator,
parameters,
source_node_id_m)
source_node_id_m = node_id
operator = 'Calibration'
parameters = get_operator_default_parameters(operator)
parameters['outputSigmaBand'] = 'true'
if polarization is not None:
parameters['selectedPolarisations'] = polarization
node_id = 'Calibration-S'
slave_graph.add_node(node_id,
operator,
parameters,
source_node_id_s)
source_node_id_s = node_id
node_id = 'Calibration-M'
master_graph.add_node(node_id,
operator,
parameters,
source_node_id_m)
source_node_id_m = node_id
operator = 'Terrain-Correction'
parameters = get_operator_default_parameters(operator)
map_proj = utm_zone
parameters['mapProjection'] = map_proj
parameters['pixelSpacingInMeter'] = resolution
parameters['nodataValueAtSea'] = 'false'
parameters['demName'] = 'SRTM 1Sec HGT'
node_id = 'Terrain-Correction-S'
slave_graph.add_node(node_id,
operator,
parameters,
source_node_id_s)
source_node_id_s = node_id
node_id = 'Terrain-Correction-M'
master_graph.add_node(node_id,
operator,
parameters,
source_node_id_m)
source_node_id_m = node_id
operator = 'Write'
parameters = get_operator_default_parameters(operator)
parameters['formatName'] = 'BEAM-DIMAP'
node_id = 'Write-S'
parameters['file'] = output_name_s
slave_graph.add_node(node_id,
operator,
parameters,
source_node_id_s)
node_id = 'Write-M'
parameters['file'] = output_name_m
master_graph.add_node(node_id,
operator,
parameters,
source_node_id_m)
if show_graph:
master_graph.view_graph()
slave_graph.view_graph()
master_graph.run()
slave_graph.run()
return [output_name_m,output_name_s]
def create_stack(products, show_graph=True):
mygraph = GraphProcessor()
operator = 'ProductSet-Reader'
parameters = get_operator_default_parameters(operator)
parameters['fileList'] = ','.join([ '{}.dim'.format(n) for n in products])
node_id = 'ProductSet-Reader'
source_node_id = ''
#parameters['file'] = product.local_path
mygraph.add_node(node_id,
operator,
parameters,
'')
source_node_id = node_id
operator = 'CreateStack'
parameters = get_operator_default_parameters(operator)
node_id = 'CreateStack'
parameters['extent'] = 'Minimum'
parameters['resamplingType'] = 'BICUBIC_INTERPOLATION'
mygraph.add_node(node_id,
operator,
parameters,
source_node_id)
source_node_id = node_id
operator = 'Write'
parameters = get_operator_default_parameters(operator)
parameters['file'] = 'stack'
parameters['formatName'] = 'BEAM-DIMAP'
node_id = 'Write'
mygraph.add_node(node_id,
operator,
parameters,
source_node_id)
if show_graph:
mygraph.view_graph()
mygraph.run()
def list_bands(product):
reader = ProductIO.getProductReader('BEAM-DIMAP')
product = reader.readProductNodes(product, None)
return list(product.getBandNames())
def change_detection(input_product, output_product, expression, show_graph=False):
mygraph = GraphProcessor()
operator = 'Read'
parameters = get_operator_default_parameters(operator)
node_id = 'Read'
source_node_id = ''
parameters['file'] = input_product
mygraph.add_node(node_id, operator, parameters, source_node_id)
source_node_id = node_id
operator = 'BandMaths'
parameters = get_operator_default_parameters(operator)
bands = '''<targetBands>
<targetBand>
<name>change_detection</name>
<type>float32</type>
<expression>{}</expression>
<description/>
<unit/>
<noDataValue>NaN</noDataValue>
</targetBand>
</targetBands>'''.format(expression)
parameters['targetBandDescriptors'] = bands
node_id = 'BandMaths'
mygraph.add_node(node_id, operator, parameters, source_node_id)
source_node_id = node_id
operator = 'Write'
parameters = get_operator_default_parameters(operator)
parameters['file'] = output_product
parameters['formatName'] = 'GeoTIFF-BigTIFF'
node_id = 'Write'
mygraph.add_node(node_id,
operator,
parameters,
source_node_id)
if show_graph:
mygraph.view_graph()
mygraph.run()
def convert_dim(input_product, show_graph=False):
mygraph = GraphProcessor()
operator = 'Read'
parameters = get_operator_default_parameters(operator)
node_id = 'Read'
source_node_id = ''
parameters['file'] = input_product
mygraph.add_node(node_id, operator, parameters, source_node_id)
source_node_id = node_id
operator = 'LinearToFromdB'
node_id = 'LinearToFromdB'
parameters = get_operator_default_parameters(operator)
mygraph.add_node(node_id, operator, parameters, source_node_id)
source_node_id = node_id
operator = 'Write'
parameters = get_operator_default_parameters(operator)
parameters['file'] = input_product.replace('.dim', '_db.tif')
parameters['formatName'] = 'GeoTIFF-BigTIFF'
node_id = 'Write'
mygraph.add_node(node_id, operator, parameters, source_node_id)
if show_graph:
mygraph.view_graph()
mygraph.run()
return input_product.replace('.dim', '_db.tif')
def cog(input_tif, output_tif):
translate_options = gdal.TranslateOptions(gdal.ParseCommandLine('-co TILED=YES ' \
'-co COPY_SRC_OVERVIEWS=YES ' \
' -co COMPRESS=LZW'))
ds = gdal.Open(input_tif, gdal.OF_READONLY)
gdal.SetConfigOption('COMPRESS_OVERVIEW', 'DEFLATE')
ds.BuildOverviews('NEAREST', [2,4,8,16,32])
ds = None
ds = gdal.Open(input_tif)
gdal.Translate(output_tif,
ds,
options=translate_options)
ds = None
os.remove('{}.ovr'.format(input_tif))
os.remove(input_tif)
def get_image_wkt(product):
src = gdal.Open(product)
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
max_x = ulx + (src.RasterXSize * xres)
min_y = uly + (src.RasterYSize * yres)
min_x = ulx
max_y = uly
source = osr.SpatialReference()
source.ImportFromWkt(src.GetProjection())
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
transform = osr.CoordinateTransformation(source, target)
result_wkt = box(transform.TransformPoint(min_x, min_y)[0],
transform.TransformPoint(min_x, min_y)[1],
transform.TransformPoint(max_x, max_y)[0],
transform.TransformPoint(max_x, max_y)[1]).wkt
return result_wkt
def polygonize(input_tif, band, epsg):
epsg_code = epsg.split(':')[1]
srs = osr.SpatialReference()
srs.ImportFromEPSG(int(epsg_code))
source_raster = gdal.Open(input_tif)
band = source_raster.GetRasterBand(band)
band_array = band.ReadAsArray()
out_vector_file = "polygonized.json"
driver = ogr.GetDriverByName('GeoJSON')
out_data_source = driver.CreateDataSource(out_vector_file+ "")
out_layer = out_data_source.CreateLayer(out_vector_file, srs=srs)
new_field = ogr.FieldDefn('change_detection', ogr.OFTInteger)
out_layer.CreateField(new_field)
gdal.Polygonize(band, None, out_layer, 0, [], callback=None )
out_data_source = None
source_raster = None
data = json.loads(open(out_vector_file).read())
gdf = gp.GeoDataFrame.from_features(data['features'])
gdf.crs = {'init':'epsg:{}'.format(epsg_code)}
gdf = gdf.to_crs(epsg=epsg_code)
os.remove(out_vector_file)
return gdf
def create_composite(input_products, output_product, band_expressions):
BandMathX = otbApplication.Registry.CreateApplication("BandMathX")
BandMathX.SetParameterStringList('il', input_products)
BandMathX.SetParameterString('out', 'temp_red_green_blue.tif')
BandMathX.SetParameterString('exp', ';'.join(band_expressions))
BandMathX.ExecuteAndWriteOutput()
Convert = otbApplication.Registry.CreateApplication('Convert')
Convert.SetParameterString('in', 'temp_red_green_blue.tif')
Convert.SetParameterString('out', output_product)
Convert.SetParameterString('type', 'linear')
Convert.SetParameterString('channels', 'rgb')
Convert.ExecuteAndWriteOutput()
os.remove('temp_red_green_blue.tif')
return output_product
def create_mask(in_composite, out_mask):
#gdal_calc.py --calc="logical_and(logical_and(A==255, B==0), C==0)" -A $1 --A_band=1 -B $1 --B_band=2 -C $1 --C_band=3 --outfile=${1::-8}.mask.tif
calc_exp="logical_and(logical_and(A==255, B==0), C==0)"
gdalCalc(calc=calc_exp, A=in_composite, A_band=1, B=in_composite, B_band=2, C=in_composite, C_band=3, outfile=out_mask)
def create_rbb(in_rgb, out_rbb):
#gdal_translate -ot UInt16 -a_nodata 256 ${1::-14}RED-BLUE.rgb.tif ${1::-8}.acd.tif -co COMPRESS=LZW -b 1 -b 3 -b 3
translate_options = gdal.TranslateOptions(gdal.ParseCommandLine('-co COMPRESS=LZW '\
'-ot UInt16 ' \
'-a_nodata 256 ' \
'-b 1 -b 3 -b 3 '))
ds = gdal.Open(in_rgb, gdal.OF_READONLY)
gdal.Translate(out_rbb,
ds,
options=translate_options) |
from django.contrib import admin
from .models import Entry
def publish_selected(modeladmin, request, queryset):
queryset.update(is_published=True)
publish_selected.short_description = "Publish the selected posts"
@admin.register(Entry)
class EntryAdmin(admin.ModelAdmin):
list_display = ("pub_date", "title", "category", "is_featured", "is_published")
actions = [publish_selected]
ordering = ("-pub_date",)
|
# Generated by Django 3.0.12 on 2021-03-01 11:06
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=250, verbose_name='Title')),
('description', models.TextField(blank=True, verbose_name='Description')),
('date', models.DateTimeField(verbose_name='Date')),
('tag', models.CharField(blank=True, default='none, ', max_length=100, verbose_name='Tag')),
],
options={
'ordering': ['title'],
},
),
]
|
__all__ = ["Brain", "Entity","STT", "TTS"]
|
# import statements
import cv2
import os
import sqlite3
from tkinter import *
'''
Function to assure whether the folder to store the training images are here.
'''
def assure_folder_exists(folder):
directory = os.path.dirname(folder)
if not os.path.exists(directory):
os.makedirs(directory)
# Variables
news_sticky = N + E + W + S
bg_color = "#ADD8E6"
fg_color = "black"
config_color = '#A4CCD0'
col_num = 1
# UI Elements
main_frame = Tk()
main_frame.title("Facial Recognition System")
main_frame.configure(bg=config_color)
# Labels
var_1 = StringVar()
l_1 = Label(main_frame, textvariable=var_1, bg=bg_color, fg=fg_color, relief=RAISED)
var_1.set("Person ID:")
l_1.grid(row=2, column=col_num, sticky=news_sticky)
var_2 = StringVar()
l_2 = Label(main_frame, textvariable=var_2, bg=bg_color, fg=fg_color, relief=RAISED)
var_2.set("Name:")
l_2.grid(row=3, column=col_num, sticky=news_sticky)
var_3 = StringVar()
l_3 = Label(main_frame, textvariable=var_3, bg=bg_color, fg=fg_color, relief=RAISED)
var_3.set("Date of Birth:")
l_3.grid(row=4, column=col_num, sticky=news_sticky)
var_4 = StringVar()
l_4 = Label(main_frame, textvariable=var_4, bg=bg_color, fg=fg_color, relief=RAISED)
var_4.set("Email:")
l_4.grid(row=5, column=col_num, sticky=news_sticky)
var_5 = StringVar()
l_5 = Label(main_frame, textvariable=var_5, bg=bg_color, fg=fg_color, relief=RAISED)
var_5.set("Address:")
l_5.grid(row=6, column=col_num, sticky=news_sticky)
# Inputs
e1_val = StringVar()
e_1 = Entry(main_frame, textvariable=e1_val, bg=bg_color, fg=fg_color)
e_1.grid(row=2, column=col_num + 1)
e2_val = StringVar()
e_2 = Entry(main_frame, textvariable=e2_val, bg=bg_color, fg=fg_color)
e_2.grid(row=3, column=col_num + 1)
e3_val = StringVar()
e_3 = Entry(main_frame, textvariable=e3_val, bg=bg_color, fg=fg_color)
e_3.grid(row=4, column=col_num + 1)
e4_val = StringVar()
e_4 = Entry(main_frame, textvariable=e4_val, bg=bg_color, fg=fg_color)
e_4.grid(row=5, column=col_num + 1)
e5_val = StringVar()
e_5 = Entry(main_frame, textvariable=e5_val, bg=bg_color, fg=fg_color)
e_5.grid(row=6, column=col_num + 1)
# UI functions
def pass_inputs():
return e1_val.get(), e2_val.get(), e3_val.get(), e4_val.get(), e5_val.get()
def complete_information_gathering():
main_frame.destroy()
# Buttons
b1 = Button(main_frame, text="OK", bg=bg_color, fg=fg_color, command=complete_information_gathering)
b1.grid(row=10, column=col_num+1)
main_frame.mainloop()
# End of UI functions
'''
The rest of this code is procedural programming.
'''
# get face id into program
id, name, age, email, address = pass_inputs()
parameters = (int(id), name, age, email, address)
#######
sql_create = '''CREATE TABLE IF NOT EXISTS Persons
(ID INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
DOB TEXT NOT NULL,
EMAIL CHAR(50),
ADDRESS TEXT CHAR(50));'''
sql_select = "SELECT * from Persons"
connection = sqlite3.connect('database/persons.db')
connection.cursor()
connection.execute(sql_create)
sql = "INSERT INTO Persons (ID,NAME,DOB,EMAIL,ADDRESS) VALUES (?, ?, ?, ?,?)"
connection.execute(sql, parameters)
connection.execute(sql_select)
connection.commit()
connection.close()
# Concurrent variables
face_id = int(id)
name_id = name
# Video - webcam start
web_cam = cv2.VideoCapture(0)
# Detect front face using HAARCASCADE FF
init_face_crop = cv2.CascadeClassifier('dir_util/haarcascade_frontalface_default.xml')
# Face Count
num_of_faces = 0
assure_folder_exists("images/")
font = cv2.FONT_HERSHEY_SIMPLEX
# encryption keys
enc = 'Spubuf!zpvs!gbdf!Dmpdlxjtf'
def decrypt(kj):
fr = []
for i in kj:
fr.append(chr(ord(i)-1))
return "".join(fr)
# Loop for faces until num of faces saved.
while (True):
# Analyse Web cam video feed
_, single_image = web_cam.read()
# Remove color channels
remove_color_channel = cv2.cvtColor(single_image, cv2.COLOR_BGR2GRAY)
# Detect number of faces in the image
list_of_faces = init_face_crop.detectMultiScale(remove_color_channel, 1.3, 5)
# for each face in list_of_faces
for (x, y, w, h) in list_of_faces:
# Crop and vectorise the image
cv2.rectangle(single_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# label text
cv2.putText(single_image, decrypt(enc), (x, y - 10), font, 0.5, (120, 255, 120), 2, 1)
# num of faces
num_of_faces += 1
if num_of_faces == 1:
# create training data.
cv2.imwrite("images/" + name_id +"_" + str(face_id) +".jpg", remove_color_channel[y:y + h, x:x + w])
# Display face with bouding boxes
cv2.imshow('frame', single_image)
# Stop video frame press q
if cv2.waitKey(1) & 0xFF == ord('q'):
break
elif num_of_faces >= 30:
print("Successfully Captured")
break
# Webcam feed ended
web_cam.release()
# Close windows
cv2.destroyAllWindows()
|
from MDSplus import Device, Event, VECTOR, Uint8Array
import subprocess
import numpy as np
import time
import traceback
import os
MC = __import__('MARTE2_COMPONENT', globals())
class MARTE2_SUPERVISOR(Device):
"""National Instrument 6683 device. Generation of clock and triggers and recording of events """
parts = [{'path': ':NAME', 'type': 'text'}, {'path': ':COMMENT',
'type': 'text'}, {'path': ':NUM_STATES', 'type': 'numeric'}]
for stateIdx in range(10):
parts.append({'path': '.STATE_'+str(stateIdx+1), 'type': 'structure'})
parts.append(
{'path': '.STATE_'+str(stateIdx+1)+':NAME', 'type': 'text'})
parts.append({'path': '.STATE_'+str(stateIdx+1) +
':NUM_THREADS', 'type': 'numeric'})
for threadIdx in range(10):
parts.append({'path': '.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1), 'type': 'structure'})
parts.append({'path': '.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':NAME', 'type': 'text'})
parts.append({'path': '.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':CORE', 'type': 'numeric'})
parts.append({'path': '.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAMS', 'type': 'numeric'})
parts.append({'path': '.TIMES', 'type': 'structure'})
for stateIdx in range(10):
parts.append({'path': '.TIMES.STATE_' +
str(stateIdx+1), 'type': 'structure'})
for threadIdx in range(10):
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1), 'type': 'structure'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1)+'.THREAD_' +
str(threadIdx+1)+':SEG_LEN', 'type': 'numeric', 'value': 0})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1)+'.THREAD_' +
str(threadIdx+1)+':CPU_MASK', 'type': 'numeric', 'value': 15})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':CYCLE', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM1', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM2', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM3', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM4', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM5', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM6', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM7', 'type': 'signal'})
parts.append({'path': '.TIMES.STATE_'+str(stateIdx+1) +
'.THREAD_'+str(threadIdx+1)+':GAM8', 'type': 'signal'})
parts.append({'path': ':MARTE_CONFIG', 'type': 'numeric'})
parts.append({'path': ':INIT', 'type': 'action',
'valueExpr': "Action(Dispatch('MARTE_SERVER','INIT',50,None),Method(None,'startMarteIdle',head))",
'options': ('no_write_shot',)})
parts.append({'path': ':GOTORUN', 'type': 'action',
'valueExpr': "Action(Dispatch('MARTE_SERVER','PON',20,None),Method(None,'gotorun',head))",
'options': ('no_write_shot',)})
parts.append({'path': ':STOP', 'type': 'action',
'valueExpr': "Action(Dispatch('MARTE_SERVER','POST_PULSE_CHECK',50,None),Method(None,'stopMarte',head))",
'options': ('no_write_shot',)})
MODE_GAM = 1
MODE_INPUT = 2
MODE_SYNCH_INPUT = 3
MODE_OUTPUT = 4
def getGamList(self, state, thread):
t = self.getTree()
gams = getattr(self, 'state_%d_thread_%d_gams' %
(state+1, thread+1)).getData()
gamNids = []
if isinstance(gams, VECTOR):
for i in range(gams.getNumDescs()):
currGamNid = gams.getDescAt(i)
gamNids.append(currGamNid)
else:
for gam1 in gams.data():
if isinstance(gam1, str):
gam = gam1
else:
gam = str(gam1, 'utf_8')
currGamNid = t.getNode(gam)
gamNids.append(currGamNid)
print(gamNids)
return gamNids
def getInfo(self):
try:
error = ''
info = {}
t = self.getTree()
numStates = self.num_states.data()
statesInfo = []
retData = []
retGams = []
threadMap = {}
typeDicts = []
# first iteration to get threadMap
for state in range(numStates):
numThreads = getattr(
self, 'state_%d_num_threads' % (state+1)).data()
for thread in range(numThreads):
threadName = getattr(
self, 'state_%d_thread_%d_name' % (state+1, thread+1)).data()
try:
gamNodes = self.getGamList(state, thread)
except:
raise Exception(
'Cannot get GAM list for state: ' + str(state + 1) + ', thread: '+str(thread + 1))
for currGamNode in gamNodes:
nid = currGamNode.getNid()
if nid in threadMap:
threadMap[nid] += [threadName]
else:
threadMap[nid] = [threadName]
# Second iteration, build the remaining
for state in range(numStates):
stateInfo = {}
stateInfo['name'] = getattr(
self, 'state_%d_name' % (state+1)).data()
numThreads = getattr(
self, 'state_%d_num_threads' % (state+1)).data()
stateThreads = []
for thread in range(numThreads):
threadInfo = {}
threadName = getattr(
self, 'state_%d_thread_%d_name' % (state+1, thread+1)).data()
try:
core = getattr(self, 'state_%d_thread_%d_core' %
(state+1, thread+1)).data()
threadInfo['core'] = core
except:
pass
threadInfo['name'] = threadName
gamNames = []
threadPeriod = 0
gamNids = []
gamNodes = self.getGamList(state, thread)
for currGamNode in gamNodes:
nid = currGamNode.getNid()
if currGamNode.isOn():
try:
gamClass = currGamNode.getData().getDevice()
gamInstance = gamClass(currGamNode)
except:
raise Exception(
'Cannot instantiate device for node '+currGamNode.getFullPath())
gamList = []
if not (currGamNode.getNid() in gamNids):
# try:
gamInstance.prepareMarteInfo()
currPeriod = gamInstance.getMarteInfo(
threadMap, retGams, retData, gamList, typeDicts)
# except:
# return 'Cannot get timebase for ' + gam, {},{}
gamNids.append(currGamNode.getNid())
# if currPeriod > 0 and threadPeriod > 0:
if currPeriod > 0 and threadPeriod > 0 and currPeriod != threadPeriod:
raise Exception('More than one component driving thread timing for state: '+str(
state+1)+', thread: '+str(thread+1))
else:
if currPeriod > 0:
threadPeriod = currPeriod
else:
dummyGams = []
dummyData = []
gamInstance.getMarteInfo(
threadMap, dummyGams, dummyData, gamList, typeDicts)
gamNames += gamList
# TIMINGS
if threadPeriod == 0:
raise Exception(
'No component driving thread timing for state: '+str(state+1)+', thread: '+str(thread+1))
gamList = []
self.getTimingInfo(
state, thread, threadPeriod, retGams, retData, gamList)
gamNames += gamList
#############################
threadInfo['gams'] = gamNames
stateThreads.append(threadInfo)
stateInfo['threads'] = stateThreads
statesInfo.append(stateInfo)
info['states'] = statesInfo
info['gams'] = retGams
info['data_sources'] = retData
info['name'] = self.getNode('name').data()
return error, info, threadMap, typeDicts
except Exception as inst:
print(traceback.format_exc())
# return inst.args[0], None, None
return str(inst), None, None, None
# Enrich GAMs and Data Sources with what is required to store timing information (IOGAM + TreeWriter) is seg_len > 0
def getTimingInfo(self, state, thread, threadPeriod, retGams, dataSources, gamList):
segLen = getattr(self, 'times_state_%d_thread_%d_seg_len' %
(state+1, thread+1)).data()
if(segLen == 0):
return
stateName = getattr(self, 'state_%d_name' % (state+1)).data()
threadName = getattr(self, 'state_%d_thread_%d_name' %
(state+1, thread+1)).data()
cpuMask = getattr(self, 'times_state_%d_thread_%d_cpu_mask' %
(state+1, thread+1)).data()
timeSignals = []
gamNodes = self.getGamList(state, thread)
for currGamNid in gamNodes:
if currGamNid.isOn():
gamName = currGamNid.getNodeName()
gamClass = currGamNid.getData().getDevice()
gamInstance = gamClass(currGamNid)
gamMode = gamInstance.mode.data()
if gamMode == MARTE2_SUPERVISOR.MODE_GAM:
timeSignals.append(gamName+'_ReadTime')
timeSignals.append(gamName+'_ExecTime')
elif gamMode == MARTE2_SUPERVISOR.MODE_OUTPUT:
timeSignals.append(gamName+'_IOGAM_WriteTime')
else:
timeSignals.append(gamName+'_DDBOutIOGAM_ReadTime')
if len(timeSignals) == 0:
return
currGam = '+State_%d_Thread_%d_TIMES_IOGAM = {\n' % (state+1, thread+1)
currGam += ' Class = IOGAM\n'
currGam += ' InputSignals = {\n'
currGam += ' '+stateName+'_'+threadName+'_CycleTime = {\n'
currGam += ' Alias = '+stateName+'.'+threadName+'_CycleTime\n'
currGam += ' DataSource = Timings\n'
currGam += ' Type = uint32\n'
currGam += ' }\n'
for timeSignal in timeSignals:
currGam += ' '+timeSignal+' = {\n'
currGam += ' DataSource = Timings\n'
currGam += ' Type = uint32\n'
currGam += ' }\n'
currGam += ' }\n'
currGam += ' OutputSignals = {\n'
currGam += ' CycleTime = {\n'
currGam += ' DataSource = State_%d_Thread_%d_TIMES_WRITER\n' % (
state+1, thread+1)
currGam += ' Type = uint32\n'
currGam += ' }\n'
for timeSignal in timeSignals:
currGam += ' '+timeSignal+' = {\n'
currGam += ' DataSource = State_%d_Thread_%d_TIMES_WRITER\n' % (
state+1, thread+1)
currGam += ' Type = uint32\n'
currGam += ' }\n'
currGam += ' }\n'
currGam += '}\n'
retGams.append(currGam)
gamList.append('State_%d_Thread_%d_TIMES_IOGAM' % (state+1, thread+1))
dataSource = ' +State_%d_Thread_%d_TIMES_WRITER = {\n' % (
state+1, thread+1)
dataSource += ' Class = MDSWriter\n'
dataSource += ' NumberOfBuffers = 20000\n'
dataSource += ' CPUMask = ' + str(cpuMask)+'\n'
dataSource += ' StackSize = 10000000\n'
dataSource += ' TreeName = "'+self.getTree().name+'"\n'
dataSource += ' PulseNumber = '+str(self.getTree().shot)+'\n'
dataSource += ' StoreOnTrigger = 0\n'
dataSource += ' TimeRefresh = 1\n'
dataSource += ' EventName = "'+gamName+'UpdatejScope"\n'
dataSource += ' Signals = {\n'
dataSource += ' CycleTime = {\n'
dataSource += ' NodeName = "' + \
getattr(self, 'times_state_%d_thread_%d_cycle' %
(state+1, thread+1)).getFullPath()+'"\n'
dataSource += ' Period = '+str(threadPeriod) + '\n'
dataSource += ' MakeSegmentAfterNWrites = '+str(segLen)+'\n'
dataSource += ' AutomaticSegmentation = 0\n'
dataSource += ' }\n'
sigIdx = 1
for timeSignal in timeSignals:
dataSource += ' '+timeSignal + ' = {\n'
dataSource += ' NodeName = "' + \
getattr(self, 'times_state_%d_thread_%d_gam' %
(state+1, thread+1)+str(sigIdx)).getFullPath()+'"\n'
dataSource += ' Period = '+str(threadPeriod) + '\n'
dataSource += ' MakeSegmentAfterNWrites = '+str(segLen)+'\n'
dataSource += ' AutomaticSegmentation = 0\n'
dataSource += ' }\n'
sigIdx = sigIdx + 1
dataSource += ' }\n'
dataSource += ' }\n'
dataSources.append(dataSource)
def declareTypes(self, typeDicts):
if len(typeDicts) == 0:
return ''
typeDecl = '+Types = {\n'
typeDecl += ' Class = ReferenceContainer\n'
for typeDict in typeDicts:
typeDecl += ' +'+typeDict['name'] + ' = {\n'
typeDecl += ' Class = IntrospectionStructure\n'
for fieldDict in typeDict['fields']:
typeDecl += ' '+fieldDict['name'] + ' = {\n'
typeDecl += ' Type = '+fieldDict['type']+'\n'
dimensions = fieldDict['dimensions']
if dimensions == 0:
numberOfElements = 1
numberOfDimensions = 0
else:
numberOfDimensions = len(fieldDict['dimensions'])
numberOfElements = 1
for currDim in fieldDict['dimensions']:
numberOfElements *= currDim
typeDecl += ' NumberOfDimensions = ' + \
str(numberOfDimensions)+'\n'
typeDecl += ' NumberOfElements = ' + \
str(numberOfElements)+'\n'
typeDecl += ' }\n'
typeDecl += ' }\n'
typeDecl += '}\n'
return typeDecl
def buildConfiguration(self):
print('START BUILD')
error, info, threadMap, typeDicts = self.getInfo()
if error != '':
return 0
confText = self.declareTypes(typeDicts)
confText += '+MDS_EVENTS = {\n'
confText += ' Class = MDSEventManager\n'
confText += ' StackSize = 1048576\n'
confText += ' CPUs = 0x1\n'
confText += ' Name = '+info['name']+'\n'
confText += '}\n'
confText += '+WebRoot = {\n'
confText += ' Class = HttpObjectBrowser\n'
confText += ' Root = "."\n'
confText += ' +ObjectBrowse = {\n'
confText += ' Class = HttpObjectBrowser\n'
confText += ' Root = "/"\n'
confText += ' }\n'
confText += ' +ResourcesHtml = {\n'
confText += ' Class = HttpDirectoryResource\n'
confText += ' BaseDir = "/opt/MARTe2/MARTe2/Resources/HTTP/"\n'
confText += ' } \n'
confText += '}\n'
confText += '+WebServer = {\n'
confText += ' Class = HttpService\n'
confText += ' Port = 8085\n'
confText += ' WebRoot = WebRoot\n'
confText += ' Timeout = 0\n'
confText += ' ListenMaxConnections = 255\n'
confText += ' AcceptTimeout = 1000\n'
confText += ' MaxNumberOfThreads = 8\n'
confText += ' MinNumberOfThreads = 1\n'
confText += '} \n'
confText += ' +StateMachine = {\n'
confText += ' Class = StateMachine\n'
confText += ' +INITIAL = {\n'
confText += ' Class = ReferenceContainer \n'
confText += ' +START = {\n'
confText += ' Class = StateMachineEvent\n'
confText += ' NextState = "IDLE"\n'
confText += ' NextStateError = "IDLE"\n'
confText += ' Timeout = 0\n'
confText += ' +StartHttpServer = {\n'
confText += ' Class = Message\n'
confText += ' Destination = "WebServer"\n'
confText += ' Function = "Start"\n'
confText += ' } \n'
confText += ' +ChangeToStateIdleMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = PrepareNextState\n'
confText += ' +Parameters = {\n'
confText += ' Class = ConfigurationDatabase\n'
confText += ' param1 = Idle\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +StartNextStateExecutionMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = StartNextStateExecution\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +IDLE = {\n'
confText += ' Class = ReferenceContainer\n'
confText += ' +GOTORUN = {\n'
confText += ' Class = StateMachineEvent\n'
confText += ' NextState = "RUN"\n'
confText += ' NextStateError = "IDLE"\n'
confText += ' Timeout = 0 \n'
confText += ' +ChangeToRunMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = PrepareNextState\n'
confText += ' +Parameters = {\n'
confText += ' Class = ConfigurationDatabase\n'
confText += ' param1 = ' + \
info['states'][0]['name']+'\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +StopCurrentStateExecutionMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = StopCurrentStateExecution\n'
confText += ' }\n'
confText += ' +StartNextStateExecutionMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = StartNextStateExecution\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +RUN = {\n'
confText += ' Class = ReferenceContainer\n'
confText += ' +GOTOIDLE = {\n'
confText += ' Class = StateMachineEvent\n'
confText += ' NextState = "IDLE"\n'
confText += ' NextStateError = "IDLE"\n'
confText += ' Timeout = 0 \n'
confText += ' +ChangeToIdleMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = PrepareNextState\n'
confText += ' +Parameters = {\n'
confText += ' Class = ConfigurationDatabase\n'
confText += ' param1 = Idle\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +StopCurrentStateExecutionMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = StopCurrentStateExecution\n'
confText += ' }\n'
confText += ' +StartNextStateExecutionMsg = {\n'
confText += ' Class = Message\n'
confText += ' Destination = '+info['name']+'\n'
confText += ' Function = StartNextStateExecution\n'
confText += ' }\n'
confText += ' } \n'
confText += ' }\n'
confText += '} \n'
confText += '$'+info['name']+' = {\n'
confText += ' Class = RealTimeApplication\n'
confText += ' +Functions = {\n'
confText += ' Class = ReferenceContainer\n'
confText += ' +IDLE_MDSPLUS = {\n'
confText += ' Class = IOGAM\n'
confText += ' InputSignals = {\n'
confText += ' Counter = {\n'
confText += ' DataSource = IDLE_MDSPLUS_TIMER\n'
confText += ' Type = uint32\n'
confText += ' NumberOfElements = 1\n'
confText += ' }\n'
confText += ' Time = {\n'
confText += ' DataSource = IDLE_MDSPLUS_TIMER\n'
confText += ' Type = uint32\n'
confText += ' NumberOfElements = 1\n'
confText += ' Frequency = 10\n'
confText += ' }\n'
confText += ' }\n'
confText += ' OutputSignals = {\n'
confText += ' Counter = {\n'
confText += ' DataSource = IDLE_MDSPLUS_DDB\n'
confText += ' Type = uint32\n'
confText += ' }\n'
confText += ' Time = {\n'
confText += ' DataSource = IDLE_MDSPLUS_DDB\n'
confText += ' Type = uint32\n'
confText += ' NumberOfElements = 1\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
for gam in info['gams']:
confText += gam
confText += ' }\n'
confText += ' +Data = {\n'
confText += ' Class = ReferenceContainer\n'
confText += ' +IDLE_MDSPLUS_TIMER = {\n'
confText += ' Class = LinuxTimer\n'
confText += ' SleepNature = "Busy"\n'
confText += ' Signals = {\n'
confText += ' Counter = {\n'
confText += ' Type = uint32\n'
confText += ' }\n'
confText += ' Time = {\n'
confText += ' Type = uint32\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +IDLE_MDSPLUS_DDB = {\n'
confText += ' Class = GAMDataSource\n'
confText += ' }\n'
confText += ' +Timings = {\n'
confText += ' Class = TimingDataSource\n'
confText += ' }\n'
for dataSource in info['data_sources']:
confText += dataSource
confText += ' }\n'
confText += ' +States = {\n'
confText += ' Class = ReferenceContainer\n'
confText += ' +Idle = {\n'
confText += ' Class = RealTimeState\n'
confText += ' +Threads = {\n'
confText += ' Class = ReferenceContainer\n'
confText += ' +Thread1 = {\n'
confText += ' Class = RealTimeThread\n'
confText += ' Functions = {IDLE_MDSPLUS}\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
for state in info['states']:
confText += ' +'+state['name'] + ' = {\n'
confText += ' Class = RealTimeState\n'
confText += ' +Threads = {\n'
confText += ' Class = ReferenceContainer\n'
for thread in state['threads']:
confText += ' +'+thread['name']+' = {\n'
confText += ' Class = RealTimeThread\n'
if 'core' in thread:
confText += ' CPUs = '+str(thread['core'])+'\n'
functionStr = ''
for gamName in thread['gams']:
functionStr += gamName + ' '
confText += ' Functions = {'+functionStr+'}\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
confText += ' }\n'
confText += ' +Scheduler = {\n'
confText += ' Class = GAMScheduler\n'
confText += ' TimingDataSource = Timings\n'
confText += ' }\n'
confText += '}\n'
print (confText)
try:
os.system('mv /tmp/'+info['name']+'_marte_configuration.cfg '+'/tmp/'+info['name']+'_marte_configuration_OLD.cfg ')
except:
pass
f = open('/tmp/'+info['name']+'_marte_configuration.cfg', 'w')
self.marte_config.putData(Uint8Array(bytearray(confText.encode())))
f.write(confText)
f.close()
print('END BUILD')
def startMarteIdle(self):
self.buildConfiguration()
subprocess.Popen(['$MARTE_DIR/Playground.sh -f /tmp/'+self.getNode(
'name').data()+'_marte_configuration.cfg -m StateMachine:START'], shell=True)
def startMarte(self):
self.buildConfiguration()
stateName = self.state_1_name.data()
subprocess.Popen(['$MARTE_DIR/Playground.sh -f /tmp/'+self.getNode(
'name').data()+'_marte_configuration.cfg -m StateMachine:START '+stateName], shell=True)
time.sleep(2)
self.gotorun()
def gotorun(self):
marteName = self.getNode('name').data()
eventString1 = 'StateMachine:GOTORUN'
Event.seteventRaw(marteName, np.frombuffer(
eventString1.encode(), dtype=np.uint8))
def gotoidle(self):
marteName = self.getNode('name').data()
eventString1 = 'StateMachine:GOTOIDLE'
Event.seteventRaw(marteName, np.frombuffer(
eventString1.encode(), dtype=np.uint8))
def doState(self, state):
marteName = self.getNode('name').data()
stateName = getattr(self, 'state_%d_name' % (state)).data()
eventString1 = marteName+':StopCurrentStateExecution:XX'
eventString2 = marteName+':'+'PrepareNextState:'+stateName
eventString3 = marteName+':StartNextStateExecution:XX'
Event.seteventRaw(marteName, np.frombuffer(
eventString1.encode(), dtype=np.uint8))
time.sleep(.1)
Event.seteventRaw(marteName, np.frombuffer(
eventString2.encode(), dtype=np.uint8))
time.sleep(.1)
Event.seteventRaw(marteName, np.frombuffer(
eventString3.encode(), dtype=np.uint8))
def doState1(self):
self.doState(1)
def doState2(self):
self.doState(2)
def doState3(self):
self.doState(3)
def doState4(self):
self.doState(4)
def doState5(self):
self.doState(5)
def suspendMarte(self):
marteName = self.getNode('name').data()
eventString1 = marteName+':StopCurrentStateExecution:XX'
eventString2 = marteName+':'+'PrepareNextState:IDLE'
eventString3 = marteName+':StartNextStateExecution:XX'
Event.seteventRaw(marteName, np.frombuffer(
eventString1.encode(), dtype=np.uint8))
time.sleep(0.1)
Event.seteventRaw(marteName, np.frombuffer(
eventString2.encode(), dtype=np.uint8))
time.sleep(0.1)
Event.seteventRaw(marteName, np.frombuffer(
eventString3.encode(), dtype=np.uint8))
def stopMarte(self):
marteName = self.getNode('name').data()
self.suspendMarte()
time.sleep(2)
Event.seteventRaw(marteName, np.frombuffer(b'EXIT', dtype=np.uint8))
time.sleep(2)
Event.seteventRaw(marteName, np.frombuffer(b'EXIT', dtype=np.uint8))
# KILL MARTe process
import subprocess
import os
command = 'ps -Af | grep %s_marte_configuration.cfg | grep MARTeApp.ex | grep -v grep | awk \'{print $2}\'' % (
marteName)
pid, error = subprocess.Popen("{cmd}".format(
cmd=command), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if len(pid) == 0:
if len(error) != 0:
print('INFO : %s' % (error))
else:
for p in pid.split():
os.kill(int(p), 9)
print('MARTe Process PID : %s Killed\n' % (p))
return 1
def check(self):
t = self.getTree()
numStates = self.num_states.data()
gamInstances = []
for state in range(numStates):
numThreads = getattr(
self, 'state_%d_num_threads' % (state+1)).data()
for thread in range(numThreads):
try:
gamNids = self.getGamList(state, thread)
for currGamNid in gamNids:
if currGamNid.isOn():
gamClass = currGamNid.getData().getDevice()
gamInstance = gamClass(currGamNid)
gamInstances.append(gamInstance)
except:
return 'Cannot get Device list for tread '+str(thread)+' of state '+str(state)
for gamInstance in gamInstances:
try:
gamInstance.prepareMarteInfo()
except:
return 'Device ' + gamInstance.getPath() + ' is not a MARTe2 device'
error, info, threadMap, typeDicts = self.getInfo()
if error != '':
return error
for gamInstance in gamInstances:
status = gamInstance.check(threadMap)
if status != '':
return gamInstance.getPath()+': ' + status
return 'Configuration OK'
# Check timebases
for state in range(numStates):
numThreads = getattr(
self, 'state_%d_num_threads' % (state+1)).data()
for thread in range(numThreads):
timebaseGenerator = ''
gamNodes = self.getGamList(state, thread)
for currGamNid in gamNodes:
if currGamNid.isOn():
gamClass = currGamNid.getData().getDevice()
gamInstance = gamClass(currGamNid)
timebaseMode = gamInstance.checkTimebase(threadMap)
if timebaseMode == MC.MARTE2_COMPONENT.TIMEBASE_GENERATOR:
if timebaseGenerator == '':
timebaseGenerator = gamInstance.name.data()
else:
return 'Multiple timebases in state %d, thread %d' % (state+1, thread+1)+': ' + timebaseGenerator + ', ' + gamInstance.name.data()
if timebaseGenerator == '':
return 'No Timebase defined in state %d, thread %d' % (state+1, thread+1)
return 'Configuration OK'
|
import io
import logging
from random import sample
from typing import List
import matplotlib.pyplot as plt
from django.contrib import messages
from django.contrib.auth import authenticate, logout, login, update_session_auth_hash
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.template import loader
from django.views.generic import DetailView
from matplotlib.backends.backend_agg import FigureCanvasAgg
from core.models import Curso, ComponenteCurricular, EstruturaCurricular, SugestaoTurma, Sala, Docente, Turma, \
SolicitacaoTurma
from core.visoes.flow_view import flow_horizontal, flow_opcionais
from .bo.curso import get_cursos
from .bo.discentes import get_discentes, get_discentes_ativos
from .bo.docente import get_docentes
from .bo.sala import get_salas
from .bo.sevices import get_oc_by_semestre, get_ch_by_semestre, get_estrutura_direito, get_estrutura_matematica, \
get_estrutura_pedagogia
from .bo.sistemas import get_estrutura_sistemas, get_estrutura_sistemas_dct
from .dao.centro_dao import get_ceres
from .dao.componente_dao import get_componentes_by_depto, get_componentes_curriculares
from .dao.departamento_dao import get_departamentos
from .filters import SalaFilter, DocenteFilter
from .forms import CadastroUsuarioForm
from .models import Horario
from .visoes.suggest_view import sugestao_grade_horarios, sugestao_manter, sugestao_incluir, sugestao_editar, \
redirecionar, sugestao_deletar, atualizar_solicitacao
from .visoes.turma_view import turmas_grade
from .visoes.user_view import criar_usuario, autenticar_logar
logger = logging.getLogger('suggestclasses.logger')
def index(request):
"""
View para o Home (Tela Inicial).
:param request: Uma requisição http.
:return: Um response com dados sobre o CERES/UFRN.
"""
ceres = get_ceres()
departamentos = get_departamentos()
cursos = get_cursos()
componentes = get_componentes_curriculares()
docentes = get_docentes()
discentes = get_discentes()
discentes_ativos = get_discentes_ativos()
context = {
'ceres': ceres,
'departamentos': departamentos,
'docentes': docentes,
'cursos': cursos,
'componentes': componentes,
'discentes': discentes,
'discentes_ativos': discentes_ativos,
}
return render(request, 'core/home.html', context)
def sobre(request):
ceres = get_ceres()
context = {
'ceres': ceres,
}
return render(request, 'core/sobre.html', context)
def dashboard(request):
"""
View index para o Dashboard.
:param request: Requisição do http.
:return: retorna um HttpResponse
"""
ceres = get_ceres()
departamentos = get_departamentos()
estruturas = EstruturaCurricular.objects.all()
cursos = Curso.objects.all()
componentes = get_componentes_curriculares()
componentes_by_depto = []
headers: List[str] = []
for d in departamentos:
headers.append(d.sigla)
componentes_by_depto.append(get_componentes_by_depto(d))
template = loader.get_template('core/dashboard.html')
context = {
'ceres': ceres,
'departamentos': departamentos,
'cursos': cursos,
'componentes': componentes,
'estruturas': estruturas,
'headers': headers,
'componentes_by_depto': componentes_by_depto,
}
return HttpResponse(template.render(context, request))
def detail(request, horario_id):
return HttpResponse("You're looking at Horario %s." % horario_id)
def curso_detail(request, curso_id):
curso = Curso.objects.get(pk=curso_id)
return HttpResponse("You're looking at Curso %s." % curso)
def horarios_list(request):
horario_list = Horario.objects.all()
horarios = []
for i in range(1, 7):
horarios.append(Horario.objects.filter(ordem=i, turno='M').order_by('dia'))
for i in range(1, 7):
horarios.append(Horario.objects.filter(ordem=i, turno='T').order_by('dia'))
for i in range(1, 5):
horarios.append(Horario.objects.filter(ordem=i, turno='N').order_by('dia'))
context = {
'horario_list': horario_list,
'horarios': horarios
}
return render(request, 'core/list.html', context)
def departamento_list(request):
"""
Lista todos os componentes curriculares.
"""
departamentos = get_departamentos()
context = {
'departamentos': departamentos
}
return render(request, 'core/departamento/list.html', context)
def curso_list(request):
"""
Lista todos os componentes curriculares.
"""
cursos = Curso.objects.all()
context = {
'cursos': cursos
}
return render(request, 'core/curso/list.html', context)
def componente_list(request):
"""
Lista todos os componentes curriculares.
"""
componentes = ComponenteCurricular.objects.all()
context = {
'componentes': componentes
}
return render(request, 'core/componente/list.html', context)
class ComponenteDetailView(DetailView):
model = ComponenteCurricular
template_name = 'core/componente/detalhar.html'
class DocenteDetailView(DetailView):
model = Docente
template_name = 'core/docente/detalhar.html'
def curriculo_list(request):
estruturas = EstruturaCurricular.objects.all()
context = {
'estruturas': estruturas
}
return render(request, 'core/curriculo/list.html', context)
def docentes_list(request):
"""
Lista todas os docentes do centro.
"""
docentes = get_docentes()
docente_filter = DocenteFilter(request.GET, queryset=docentes)
context = {
'filter': docente_filter
}
return render(request, 'core/docente/list.html', context)
def sala_list(request):
"""
Lista todas as salas do centro.
"""
salas = Sala.objects.all()
context = {
'salas': salas
}
return render(request, 'core/sala/list.html', context)
def flow_list(request):
"""
Lista todas as Estruturas Curriculares do centro CERES.
"""
bsi_flow_1a = get_estrutura_sistemas()
bsi_flow_1b = get_estrutura_sistemas_dct()
ped_flow = get_estrutura_pedagogia()
mat_flow = get_estrutura_matematica()
dir_flow = get_estrutura_direito()
context = {
'dir_flow': dir_flow,
'mat_flow': mat_flow,
'ped_flow': ped_flow,
'bsi_flow_1a': bsi_flow_1a,
'bsi_flow_1b': bsi_flow_1b
}
return render(request, 'core/flow/list.html', context)
def flow_bsi(request):
bsi_ec = get_estrutura_sistemas()
bsi_oc_semestres = []
bsi_ch_semestres = []
bsi_oc_op = get_oc_by_semestre(bsi_ec, 0)
headers: List[str] = []
for s in range(1, 9):
headers.append(f"{s}º Semestre")
bsi_oc_semestres.append(get_oc_by_semestre(bsi_ec, s))
bsi_ch_semestres.append(get_ch_by_semestre(bsi_ec, s))
context = {
'bsi_ec': bsi_ec,
'headers': headers,
'bsi_oc_semestres': bsi_oc_semestres,
'bsi_oc_op': bsi_oc_op,
'bsi_ch_semestres': bsi_ch_semestres,
}
return render(request, 'core/flow/bsi.html', context)
def flow_bsi_1b(request):
bsi_ec = get_estrutura_sistemas_dct()
bsi_oc_semestres = []
bsi_ch_semestres = []
bsi_oc_op = get_oc_by_semestre(bsi_ec, 0)
headers: List[str] = []
bsi_tam = []
bsi_oc_max = 0
for s in range(1, 9):
oc = get_oc_by_semestre(bsi_ec, s)
ch = get_ch_by_semestre(bsi_ec, s)
headers.append(f"{s}º Semestre")
tam = len(oc)
bsi_tam.append(tam)
bsi_oc_semestres.append(oc)
bsi_ch_semestres.append(ch)
if tam >= bsi_oc_max:
bsi_oc_max = tam
for i in range(0, len(bsi_tam)):
bsi_tam[i] = bsi_oc_max - bsi_tam[i]
context = {
'bsi_ec': bsi_ec,
'headers': headers,
'bsi_oc_semestres': bsi_oc_semestres,
'bsi_oc_op': bsi_oc_op,
'bsi_tam': bsi_tam,
'bsi_oc_max': bsi_oc_max,
'bsi_ch_semestres': bsi_ch_semestres,
}
return render(request, 'core/flow/bsi-1b.html', context)
def flow_bsi_1b_h(request):
bsi_ec = get_estrutura_sistemas_dct()
link_opcionais = '/core/flow/bsi/opcionais'
return flow_horizontal(request, bsi_ec, link_opcionais)
def flow_bsi_op(request):
bsi_ec = get_estrutura_sistemas_dct()
return flow_opcionais(request, bsi_ec)
def flow_dir(request):
dir_ec = get_estrutura_direito()
link_opcionais = '/core/flow/dir/opcionais'
return flow_horizontal(request, dir_ec, link_opcionais)
def flow_dir_op(request):
dir_ec = get_estrutura_direito()
return flow_opcionais(request, dir_ec)
def flow_mat_h(request):
mat_ec = get_estrutura_matematica()
link_opcionais = '/core/flow/mat/opcionais'
return flow_horizontal(request, mat_ec, link_opcionais)
def flow_mat_op(request):
mat_ec = get_estrutura_matematica()
return flow_opcionais(request, mat_ec)
def flow_ped_h(request):
ped_ec = get_estrutura_pedagogia()
link_opcionais = '/core/flow/ped/opcionais'
return flow_horizontal(request, ped_ec, link_opcionais)
def flow_ped_op(request):
ped_ec = get_estrutura_pedagogia()
return flow_opcionais(request, ped_ec)
def cadastrar_usuario(request):
if request.method == "POST":
form_usuario = CadastroUsuarioForm(request.POST)
if form_usuario.is_valid():
try:
criar_usuario(request, form_usuario)
autenticar_logar(request, form_usuario)
messages.success(request, 'Usuário cadastrado com sucesso.')
return redirect('index')
except ValidationError as e:
form_usuario.add_error(None, e)
else:
messages.error(request, 'O formulário contém dados inválidos!')
else:
form_usuario = CadastroUsuarioForm()
return render(request, 'core/usuario/cadastro.html', {'form_usuario': form_usuario})
def logar_usuario(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
usuario = authenticate(request, username=username, password=password)
if usuario is not None:
login(request, usuario)
messages.success(request, 'Usuário logado com sucesso.')
return redirect('index')
else:
messages.error(request, 'Erro ao logar usuário.')
form_login = AuthenticationForm()
else:
form_login = AuthenticationForm()
return render(request, 'core/usuario/login.html', {'form_login': form_login})
@login_required(login_url='/core/usuario/logar')
def deslogar_usuario(request):
logout(request)
messages.success(request, 'Usuário deslogado com sucesso.')
return redirect('index')
@login_required(login_url='/core/usuario/logar')
def alterar_senha(request):
if request.method == "POST":
form_senha = PasswordChangeForm(request.user, request.POST)
if form_senha.is_valid():
user = form_senha.save()
update_session_auth_hash(request, user)
messages.success(request, 'Usuário atualizado com sucesso.')
return redirect('index')
else:
form_senha = PasswordChangeForm(request.user)
return render(request, 'core/usuario/alterar_senha.html', {'form_senha': form_senha})
def turmas_list(request):
"""
Lista todas as Turmas do centro CERES.
"""
dir_flow = get_estrutura_direito()
bsi_flow = get_estrutura_sistemas_dct()
ped_flow = get_estrutura_pedagogia()
mat_flow = get_estrutura_matematica()
context = {
'dir_flow': dir_flow,
'mat_flow': mat_flow,
'ped_flow': ped_flow,
'bsi_flow': bsi_flow
}
return render(request, 'core/turmas/list.html', context)
class TurmaDetailView(DetailView):
model = Turma
template_name = 'core/turmas/detalhar.html'
def turmas_dir(request):
dir_ddir = get_estrutura_direito()
turmas_list_link = '/core/turmas/dir'
return turmas_grade(request, dir_ddir, turmas_list_link)
def turmas_mat(request):
mat_dcea = get_estrutura_matematica()
turmas_list_link = '/core/turmas/mat'
return turmas_grade(request, mat_dcea, turmas_list_link)
def turmas_bsi(request):
bsi_dct = get_estrutura_sistemas_dct()
turmas_list_link = '/core/turmas/bsi'
return turmas_grade(request, bsi_dct, turmas_list_link)
def turmas_ped(request):
ped_deduc = get_estrutura_pedagogia()
turmas_list_link = '/core/turmas/ped'
return turmas_grade(request, ped_deduc, turmas_list_link)
def sugestao_list(request):
"""
Tela para Listar os Curso com possibilidade de cadastrar Sugestões de Turmas.
"""
dir_flow = get_estrutura_direito()
bsi_flow = get_estrutura_sistemas_dct()
ped_flow = get_estrutura_pedagogia()
mat_flow = get_estrutura_matematica()
context = {
'dir_flow': dir_flow,
'mat_flow': mat_flow,
'ped_flow': ped_flow,
'bsi_flow': bsi_flow
}
return render(request, 'core/sugestao/list.html', context)
class SugestaoTurmaDetailView(DetailView):
model = SugestaoTurma
template_name = 'core/sugestao/detalhar.html'
def sugestao_solicitar(request, pk):
return atualizar_solicitacao(request, pk)
def solicitacao_turma_listar(request, pk):
turma = SugestaoTurma.objects.get(pk=pk)
solicitacoes = SolicitacaoTurma.objects.filter(turma=turma).order_by('criada_em', 'solicitador__nome_curso')
context = {
'turma': turma,
'solicitacoes': solicitacoes,
}
return render(request, 'core/sugestao/solicitacao_listar.html', context)
def sugestao_dir_list(request):
dir_ddir = get_estrutura_direito()
sugestao_incluir_link = '/core/sugestao/dir/incluir'
sugestao_manter_link = '/core/sugestao/dir/manter'
sugestao_list_link = '/core/sugestao/dir/list'
return sugestao_grade_horarios(request, dir_ddir, sugestao_incluir_link, sugestao_manter_link, sugestao_list_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_dir_manter(request):
"""
Tela de Manter Sugestão de Turmas do Curso de Direito.
"""
dir_ddir = get_estrutura_direito()
sugestao_incluir_link = '/core/sugestao/dir/incluir'
sugestao_editar_link = 'sugestao_dir_editar'
sugestao_deletar_link = 'sugestao_dir_deletar'
sugestao_grade_link = '/core/sugestao/dir/list'
return sugestao_manter(request, dir_ddir, sugestao_incluir_link, sugestao_grade_link,
sugestao_editar_link, sugestao_deletar_link)
@permission_required("core.add_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_dir_incluir(request):
dir_ddir = get_estrutura_direito()
sugestao_manter_link = '/core/sugestao/dir/manter'
return sugestao_incluir(request, dir_ddir, sugestao_manter_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_dir_editar(request, pk):
dir_ddir = get_estrutura_direito()
return sugestao_editar(request, pk, estrutura=dir_ddir)
@permission_required("core.delete_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_dir_deletar(request, pk):
dir_ddir = get_estrutura_direito()
return sugestao_deletar(request, pk, estrutura=dir_ddir)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_mat_manter(request):
"""
Tela de Manter Sugestão de Turmas do Curso de Matemática.
"""
mat_dcea = get_estrutura_matematica()
sugestao_incluir_link = '/core/sugestao/mat/incluir'
sugestao_editar_link = 'sugestao_mat_editar'
sugestao_deletar_link = 'sugestao_mat_deletar'
sugestao_grade_link = '/core/sugestao/mat/list'
return sugestao_manter(request, mat_dcea, sugestao_incluir_link, sugestao_grade_link,
sugestao_editar_link, sugestao_deletar_link)
@permission_required("core.add_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_mat_incluir(request):
mat_dcea = get_estrutura_matematica()
sugestao_manter_link = '/core/sugestao/mat/manter'
return sugestao_incluir(request, mat_dcea, sugestao_manter_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_mat_editar(request, pk):
mat_dcea = get_estrutura_matematica()
return sugestao_editar(request, pk, estrutura=mat_dcea)
@permission_required("core.delete_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_mat_deletar(request, pk):
mat_dcea = get_estrutura_matematica()
return sugestao_deletar(request, pk, estrutura=mat_dcea)
def sugestao_mat_list(request):
mat_dcea = get_estrutura_matematica()
sugestao_incluir_link = '/core/sugestao/mat/incluir'
sugestao_manter_link = '/core/sugestao/mat/manter'
sugestao_list_link = '/core/sugestao/mat/list'
return sugestao_grade_horarios(request, mat_dcea, sugestao_incluir_link, sugestao_manter_link, sugestao_list_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_bsi_manter(request):
"""
Tela de Manter Sugestão de Turmas do Curso de Sistemas de Informação.
"""
bsi_dct = get_estrutura_sistemas_dct()
sugestao_incluir_link = '/core/sugestao/bsi/incluir'
sugestao_editar_link = 'sugestao_bsi_editar'
sugestao_deletar_link = 'sugestao_bsi_deletar'
sugestao_grade_link = '/core/sugestao/bsi/list'
return sugestao_manter(request, bsi_dct, sugestao_incluir_link, sugestao_grade_link,
sugestao_editar_link, sugestao_deletar_link)
@permission_required("core.add_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_bsi_incluir(request):
bsi_dct = get_estrutura_sistemas_dct()
sugestao_manter_link = '/core/sugestao/bsi/manter'
return sugestao_incluir(request, bsi_dct, sugestao_manter_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_bsi_editar(request, pk):
bsi_dct = get_estrutura_sistemas_dct()
return sugestao_editar(request, pk, estrutura=bsi_dct)
@permission_required("core.delete_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_bsi_deletar(request, pk):
bsi_dct = get_estrutura_sistemas_dct()
return sugestao_deletar(request, pk, estrutura=bsi_dct)
def sugestao_bsi_list(request):
bsi_dct = get_estrutura_sistemas_dct()
sugestao_incluir_link = '/core/sugestao/bsi/incluir'
sugestao_manter_link = '/core/sugestao/bsi/manter'
sugestao_list_link = '/core/sugestao/bsi/list'
return sugestao_grade_horarios(request, bsi_dct, sugestao_incluir_link, sugestao_manter_link, sugestao_list_link)
def sugestao_ped_list(request):
ped_deduc = get_estrutura_pedagogia()
sugestao_incluir_link = '/core/sugestao/ped/incluir'
sugestao_manter_link = '/core/sugestao/ped/manter'
sugestao_list_link = '/core/sugestao/ped/list'
return sugestao_grade_horarios(request, ped_deduc, sugestao_incluir_link, sugestao_manter_link, sugestao_list_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_ped_manter(request):
"""
Tela de Manter Sugestão de Turmas do Curso de Pedagogia.
"""
ped_deduc = get_estrutura_pedagogia()
sugestao_incluir_link = '/core/sugestao/ped/incluir'
sugestao_editar_link = 'sugestao_ped_editar'
sugestao_deletar_link = 'sugestao_ped_deletar'
sugestao_grade_link = '/core/sugestao/ped/list'
return sugestao_manter(request, ped_deduc, sugestao_incluir_link, sugestao_grade_link,
sugestao_editar_link, sugestao_deletar_link)
@permission_required("core.add_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_ped_incluir(request):
ped_deduc = get_estrutura_pedagogia()
sugestao_manter_link = '/core/sugestao/ped/manter'
return sugestao_incluir(request, ped_deduc, sugestao_manter_link)
@permission_required("core.change_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_ped_editar(request, pk):
ped_deduc = get_estrutura_pedagogia()
return sugestao_editar(request, pk, estrutura=ped_deduc)
@permission_required("core.delete_sugestaoturma", login_url='/core/usuario/logar', raise_exception=True)
def sugestao_ped_deletar(request, pk):
ped_deduc = get_estrutura_pedagogia()
return sugestao_deletar(request, pk, estrutura=ped_deduc)
def error_403(request, exception):
logger.error('Você não tem permissão de acessar "' + request.path + '" 403 ',
exc_info=exception)
messages.error(request, 'Você não tem permissão de acessar: ' + request.path)
return redirecionar(request)
def search_salas(request):
salas = get_salas()
sala_filter = SalaFilter(request.GET, queryset=salas)
return render(request, 'core/sala/list.html', {'filter': sala_filter})
def plot(request):
# Creamos los datos para representar en el gráfico
x = range(1, 11)
y = sample(range(20), len(x))
# Creamos una figura y le dibujamos el gráfico
f = plt.figure()
# Creamos los ejes
axes = f.add_axes([0.15, 0.15, 0.75, 0.75]) # [left, bottom, width, height]
axes.plot(x, y)
axes.set_xlabel("Eje X")
axes.set_ylabel("Eje Y")
axes.set_title("Mi gráfico dinámico")
# Como enviaremos la imagen en bytes la guardaremos en un buffer
buf = io.BytesIO()
canvas = FigureCanvasAgg(f)
canvas.print_png(buf)
# Creamos la respuesta enviando los bytes en tipo imagen png
response = HttpResponse(buf.getvalue(), content_type='image/png')
# Limpiamos la figura para liberar memoria
f.clear()
# Añadimos la cabecera de longitud de fichero para más estabilidad
response['Content-Length'] = str(len(response.content))
# Devolvemos la response
return response
|
# License: BSD 3 clause
import tick.base
from .hawkes_kernels import (HawkesKernel0, HawkesKernelExp,
HawkesKernelPowerLaw, HawkesKernelSumExp,
HawkesKernelTimeFunc)
from .simu_hawkes import SimuHawkes
from .simu_hawkes_exp_kernels import SimuHawkesExpKernels
from .simu_hawkes_multi import SimuHawkesMulti
from .simu_hawkes_sumexp_kernels import SimuHawkesSumExpKernels
from .simu_inhomogeneous_poisson import SimuInhomogeneousPoisson
from .simu_poisson_process import SimuPoissonProcess
__all__ = [
"SimuPoissonProcess", "SimuInhomogeneousPoisson", "SimuHawkes",
"SimuHawkesExpKernels", "SimuHawkesSumExpKernels", "SimuHawkesMulti",
"HawkesKernelExp", "HawkesKernelSumExp", "HawkesKernelPowerLaw",
"HawkesKernelTimeFunc", "HawkesKernel0"
]
|
import re
from typing import Dict, List, Optional, Union
from ufal.udpipe import InputFormat
from ufal.udpipe import Model
from ufal.udpipe import OutputFormat, ProcessingError, Sentence
from .utils import get_path
def _default_model_meta(lang: str, name: str) -> Dict:
return {
"author": "Milan Straka & Jana Straková",
"description": "UDPipe pretrained model.",
"email": "[email protected]",
"lang": f"udpipe_{lang}",
"license": "CC BY-NC-SA 4.0",
"name": name,
"parent_package": "spacy_udpipe",
"pipeline": [
"Tokenizer", "Tagger", "Lemmatizer", "Parser"
],
"source": "Universal Dependencies 2.5",
"url": "http://ufal.mff.cuni.cz/udpipe",
"version": "1.2.0"
}
class PretokenizedInputFormat:
"""Dummy tokenizer for pretokenized input.
Execution speed might be slow compared to other UDPipe tokenizers
due to pure Python implementation. Mocks InputFormat API to enable
plug-and-play behaviour.
"""
def setText(self, text: str) -> None:
"""Store text in iterable lines for tokenization.
text: string, where each sentence is on a line and tokens
are separated by tabs.
"""
self.lines = iter(text.split("\n"))
def nextSentence(self, sentence: Sentence, _: ProcessingError) -> bool:
"""Tokenize each line from stored lines and store tokens in sentence.
sentence: UDPipe container for storing tokens.
"""
try:
line = next(self.lines)
except StopIteration:
return False
tokens = line.split("\t")
num_tokens = len(tokens)
for i, token in enumerate(tokens):
word = sentence.addWord(token)
if i < num_tokens - 1 and re.match(r"\W", tokens[i + 1]):
# leave no space after current token iff next token
# is non-alphanumeric (i.e. punctuation)
word.setSpaceAfter(False)
return True
class UDPipeModel:
def __init__(
self,
lang: str,
path: Optional[str] = None,
meta: Optional[Dict] = None
):
"""Load UDPipe model for given language.
lang: ISO 639-1 language code or shorthand UDPipe model name.
path: Path to UDPipe model.
meta: Meta-information about the UDPipe model.
"""
path = path or get_path(lang=lang)
self.model = Model.load(path)
self._lang = lang.split("-")[0]
self._path = path
self._meta = meta or _default_model_meta(
self._lang, self._path.split("/")[-1]
)
def __reduce__(self):
# required for multiprocessing on Windows
return self.__class__, (self._lang, self._path, self._meta)
def __call__(
self,
text: Union[
str,
List[str],
List[List[str]]
]
) -> List[Sentence]:
"""Tokenize, tag and parse the text and return it in an UDPipe
representation.
text: Input text, can be presegmented or pretokenized:
str : raw text,
List[str] : presegmented text,
List[List[str]] : pretokenized text.
RETURNS: Processed sentences.
"""
sentences = self.tokenize(text)
for s in sentences:
self.tag(s)
self.parse(s)
return sentences
def _read(self, text: str, input_format: str) -> List[Sentence]:
"""Convert the text to an UDPipe representation.
text: Input text.
input_format: Desired input format.
RETURNS: Processed sentences.
"""
input_format.setText(text)
error = ProcessingError()
sentences = []
sentence = Sentence()
while input_format.nextSentence(sentence, error):
sentences.append(sentence)
sentence = Sentence()
if error.occurred():
raise Exception(error.message)
return sentences
def tokenize(
self,
text: Union[
str,
List[str],
List[List[str]]
]
) -> List[Sentence]:
"""Tokenize input text.
text: Input text, can be presegmented or pretokenized:
str : raw text,
List[str] : presegmented text,
List[List[str]] : pretokenized text.
Note: both presegmented and pretokenized text can not contain
newline or tab characters.
RETURNS: Processed sentences.
"""
if isinstance(text, str):
tokenizer = self.model.newTokenizer(self.model.DEFAULT)
elif isinstance(text, list):
if isinstance(text[0], list):
text = "\n".join("\t".join(sent) for sent in text)
tokenizer = PretokenizedInputFormat()
else:
text = "\n".join(text)
tokenizer = self.model.newTokenizer(
self.model.TOKENIZER_PRESEGMENTED
)
else:
raise TypeError(
"\n".join(
(f"Input type is {type(text)}, but must be one:",
"str : raw text",
"List[str] : presegmented text",
"List[List[str]] : pretokenized text")
)
)
if not tokenizer:
raise Exception(
"The model does not have a tokenizer "
f"so it can not tokenize input: {text}"
)
return self._read(text=text, input_format=tokenizer)
def tag(self, sentence: Sentence) -> None:
"""Assign part-of-speech tags (inplace).
sentence: Input sentence.
"""
self.model.tag(sentence, self.model.DEFAULT)
def parse(self, sentence: Sentence) -> None:
"""Assign dependency parse relations (inplace).
sentence: Input sentence.
"""
self.model.parse(sentence, self.model.DEFAULT)
def read(self, text: str, in_format: str) -> List[Sentence]:
"""Load text in the given format and return it in an UDPipe
representation.
text: Text to load.
in_format: 'conllu'|'horizontal'|'vertical'.
RETURNS: Processed sentences.
"""
input_format = InputFormat.newInputFormat(in_format)
if not input_format:
raise Exception(f"Cannot create input format '{in_format}'")
return self._read(text=text, input_format=input_format)
def write(self, sentences: List[Sentence], out_format: str) -> str:
"""Write given sentences in the required output format.
sentences: Input ufal.udpipe.Sentence-s.
out_format: 'conllu'|'horizontal'|'vertical'.
RETURNS: Sentences formatted in the out_format.
"""
output_format = OutputFormat.newOutputFormat(out_format)
output = "".join([output_format.writeSentence(s) for s in sentences])
output += output_format.finishDocument()
return output
|
import os
def attempt_already_made(function_name, dirname, new_args):
MAKE_ATTEMPT = False
# Construct filename from function and dirname.
filename = dirname + function_name + '_args.sobj'
special_comparisons = {'construct_all_odes' : construct_all_odes_cmp}
try:
old_args = load(filename)
except IOError:
# Legacy code to suppose old data. Will be depreciated.
# print ("\nACHTUNG! Bitte stellen dass alte Daten neu formatiert wurden. "
# + "Versuche es trotzdem nochmal...\n")
save(new_args, filename)
return MAKE_ATTEMPT
if function_name in special_comparisons:
comparison_function = special_comparisons[function_name]
else:
comparison_function = (lambda x,y : x['timeout'] > y['timeout'])
# The comparison function should return True if an attempt should be made.
if comparison_function(new_args, old_args):
save(new_args, filename)
return MAKE_ATTEMPT
else:
return not MAKE_ATTEMPT
def construct_all_odes_cmp(x,y):
if x['only_first'] == False and y['only_first'] == True:
return True
else:
return x['timeout'] > y['timeout']
|
somaIdade = 0
homemVelho = [0, 'nome']
mulher = 0
for p in range(1, 5):
print('=' * 10, f'{p}ª Pessoa', '=' * 10)
nome = str(input('Informe seu nome: ')).strip()
idade = int(input('Informe sua idade: '))
sexo = str(input('Qual o seu sexo?\nSexo [M/F]: ')).strip().upper()
somaIdade += idade
if sexo == 'M' and idade > homemVelho[0]:
homemVelho[0] = idade
homemVelho[1] = nome
elif sexo == 'F' and idade < 20:
mulher += 1
print(f'A média de idade do grupo é de {somaIdade / 4} anos')
print(f'O nome do homem mais velho tem {homemVelho[0]} anos e seu nome é {homemVelho[1]}')
print(f'{mulher} mulheres têm menos de 20 anos')
|
from __future__ import division
from expr_utils import *
PATH = "" # Data path
if __name__ == "__main__":
# Command-line args:
# fname: 'ORL' or 'PEMS'
# rep: trial number
fname, rep = sys.argv[1:]
rep = int(rep)
# ORL data (400 * 10304)
# PEMS-SF data (440 * 138672)
A = np.load(PATH + "%s-data.npy" % fname)
b = np.load(PATH + "%s-labels.npy" % fname)
n, d = A.shape
num_iters = 50
num_cols_list = np.linspace(1000, 5000, 6) if fname == 'ORL' else \
np.linspace(5000, 10000, 6)
# Relative errors w.r.t. degrees of freedom
lmbd_list = [1., 2., 5., 10., 20., 50.]
for lmbd in lmbd_list:
print "lmbd = %s" % lmbd
res = expr_err(A, b, lmbd, num_cols_list, num_iters, seed=1230+rep)
# pckl_write(res, "%s-lmbd%d-rep%d.res" % (fname, lmbd, rep))
# Accuracy
num_iters_list = range(1, 11)
res = expr_acc(A, b, lmbd, num_iters_list,
num_cols=5000 if fname == 'ORL' else 10000,
num_reps=20)
# pckl_write(res, "%s-lmbd%d-acc.res" % (fname, lmbd))
|
mainpage="""/**
\mainpage {0}
This is the mainpage description for the "{0}" project as found in
mainpage.dox.
*/
"""
|
# Generated by Django 3.1.1 on 2020-09-21 18:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
('categories', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='products',
field=models.ManyToManyField(blank=True, to='products.Product'),
),
]
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'FormloginObXcwY.ui'
##
## Created by: Qt User Interface Compiler version 5.14.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(474, 682)
icon = QIcon()
icon.addFile(u":/img/logo.png", QSize(), QIcon.Normal, QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setWindowOpacity(1.000000000000000)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.horizontalLayout = QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.frame_Shadow = QFrame(self.centralwidget)
self.frame_Shadow.setObjectName(u"frame_Shadow")
self.frame_Shadow.setAutoFillBackground(False)
self.frame_Shadow.setStyleSheet(u"QFrame{\n"
"border:0px;\n"
" background: url(\":/img/bg.jpg\") no-repeat;\n"
" border-radius: 10px;\n"
"}\n"
"")
self.frame_Shadow.setFrameShape(QFrame.StyledPanel)
self.frame_Shadow.setFrameShadow(QFrame.Raised)
self.verticalLayout = QVBoxLayout(self.frame_Shadow)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.frame_Object = QFrame(self.frame_Shadow)
self.frame_Object.setObjectName(u"frame_Object")
self.frame_Object.setStyleSheet(u"QFrame{\n"
"border:0px;\n"
" background: none;\n"
"}")
self.frame_Object.setFrameShape(QFrame.StyledPanel)
self.frame_Object.setFrameShadow(QFrame.Raised)
self.pushButton_Login = QPushButton(self.frame_Object)
self.pushButton_Login.setObjectName(u"pushButton_Login")
self.pushButton_Login.setGeometry(QRect(160, 590, 131, 41))
font = QFont()
font.setFamily(u"Noto Sans CJK HK")
font.setBold(True)
font.setWeight(75)
self.pushButton_Login.setFont(font)
self.pushButton_Login.setCursor(QCursor(Qt.PointingHandCursor))
self.pushButton_Login.setStyleSheet(u"QPushButton{\n"
" border-radius: 15px;\n"
" background-color: rgb(255, 51, 102);\n"
" color:#fff;\n"
" font-size:15px;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(255, 50, 121);\n"
" border-radius: 15px;\n"
" border:1px solid rgb(255, 51, 102);\n"
"}\n"
"\n"
"")
self.lbl_NewUser = QLabel(self.frame_Object)
self.lbl_NewUser.setObjectName(u"lbl_NewUser")
self.lbl_NewUser.setGeometry(QRect(170, 540, 71, 17))
self.lbl_NewUser.setCursor(QCursor(Qt.PointingHandCursor))
self.lbl_NewUser.setStyleSheet(u"QLabel{\n"
"\n"
"color: rgb(255, 51, 102);\n"
"\n"
"\n"
"}\n"
"\n"
"QLabel:hover {\n"
" color: rgb(255, 50, 121);\n"
"}")
self.lbl_SignUp = QLabel(self.frame_Object)
self.lbl_SignUp.setObjectName(u"lbl_SignUp")
self.lbl_SignUp.setGeometry(QRect(240, 540, 54, 17))
self.lbl_SignUp.setFont(font)
self.lbl_SignUp.setStyleSheet(u"QLabel{\n"
"\n"
"color:#fff;\n"
"}")
self.lbl_UserLogin = QLabel(self.frame_Object)
self.lbl_UserLogin.setObjectName(u"lbl_UserLogin")
self.lbl_UserLogin.setGeometry(QRect(110, 330, 241, 51))
self.lbl_UserLogin.setStyleSheet(u"QLabel{\n"
"\n"
"color:#fff;\n"
"font-size:30px;\n"
"}")
self.lbl_UserLogin.setAlignment(Qt.AlignCenter)
self.lineEdit_Login = QLineEdit(self.frame_Object)
self.lineEdit_Login.setObjectName(u"lineEdit_Login")
self.lineEdit_Login.setGeometry(QRect(60, 400, 341, 51))
self.lineEdit_Login.setStyleSheet(u"QLineEdit {\n"
" border: 1px solid rgb(238, 238, 236);\n"
" border-radius: 20px;\n"
" padding: 15px;\n"
" background-color: #fff;\n"
" color: rgb(200, 200, 200);\n"
"}\n"
"QLineEdit:hover {\n"
" border: 1px solid rgb(186, 189, 182);\n"
"}\n"
"QLineEdit:focus {\n"
" border: 1px solid rgb(114, 159, 207);\n"
" color: rgb(100, 100, 100);\n"
"}")
self.lineEdit_Password = QLineEdit(self.frame_Object)
self.lineEdit_Password.setObjectName(u"lineEdit_Password")
self.lineEdit_Password.setGeometry(QRect(60, 470, 341, 51))
self.lineEdit_Password.setStyleSheet(u"QLineEdit {\n"
" border: 1px solid rgb(238, 238, 236);\n"
" border-radius: 20px;\n"
" padding: 15px;\n"
" background-color: #fff;\n"
" color: rgb(200, 200, 200);\n"
"}\n"
"QLineEdit:hover {\n"
" border: 1px solid rgb(186, 189, 182);\n"
"}\n"
"QLineEdit:focus {\n"
" border: 1px solid rgb(114, 159, 207);\n"
" color: rgb(100, 100, 100);\n"
"}")
self.frame_Logo = QFrame(self.frame_Object)
self.frame_Logo.setObjectName(u"frame_Logo")
self.frame_Logo.setGeometry(QRect(120, 70, 241, 211))
self.frame_Logo.setStyleSheet(u"QFrame{\n"
"border:0px;\n"
" background: url(\":/img/logo.png\");\n"
" background-repeat: no-repeat;\n"
" border-radius: 10px;\n"
"}\n"
"")
self.frame_Logo.setFrameShape(QFrame.StyledPanel)
self.frame_Logo.setFrameShadow(QFrame.Raised)
self.frame_TopBar = QFrame(self.frame_Object)
self.frame_TopBar.setObjectName(u"frame_TopBar")
self.frame_TopBar.setGeometry(QRect(0, 0, 456, 41))
self.frame_TopBar.setMinimumSize(QSize(456, 41))
self.frame_TopBar.setMaximumSize(QSize(456, 41))
self.frame_TopBar.setLayoutDirection(Qt.RightToLeft)
self.frame_TopBar.setStyleSheet(u"QFrame{\n"
"background-color:rgb(42, 42, 42);\n"
"}")
self.frame_TopBar.setFrameShape(QFrame.StyledPanel)
self.frame_TopBar.setFrameShadow(QFrame.Raised)
self.pushButton_Exit = QPushButton(self.frame_TopBar)
self.pushButton_Exit.setObjectName(u"pushButton_Exit")
self.pushButton_Exit.setGeometry(QRect(410, 5, 41, 31))
self.pushButton_Exit.setMinimumSize(QSize(41, 0))
self.pushButton_Exit.setMaximumSize(QSize(50, 50))
self.pushButton_Exit.setCursor(QCursor(Qt.PointingHandCursor))
self.pushButton_Exit.setLayoutDirection(Qt.RightToLeft)
self.pushButton_Exit.setStyleSheet(u"QPushButton{\n"
" border-radius: 15px;\n"
" background-color: rgb(46, 52, 54);\n"
" color:#fff;\n"
" font-size:15px;\n"
" text-align:center;\n"
"\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(255, 50, 121);\n"
"}\n"
"\n"
"")
self.pushButton_Exit.setText(u"X")
self.verticalLayout.addWidget(self.frame_Object)
self.horizontalLayout.addWidget(self.frame_Shadow)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u" Login User", None))
self.pushButton_Login.setText(QCoreApplication.translate("MainWindow", u"LOGIN", None))
self.lbl_NewUser.setText(QCoreApplication.translate("MainWindow", u"New User?", None))
self.lbl_SignUp.setText(QCoreApplication.translate("MainWindow", u"Sign Up", None))
self.lbl_UserLogin.setText(QCoreApplication.translate("MainWindow", u"User Login", None))
self.lineEdit_Login.setText("")
self.lineEdit_Login.setPlaceholderText(QCoreApplication.translate("MainWindow", u"Username", None))
self.lineEdit_Password.setText("")
self.lineEdit_Password.setPlaceholderText(QCoreApplication.translate("MainWindow", u"Password", None))
# retranslateUi
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from workflows.models import State
from workflows.models import StateInheritanceBlock
from workflows.models import StatePermissionRelation
from workflows.models import StateObjectRelation
from workflows.models import TransitionObjectRelation
from workflows.models import Transition
from workflows.models import Workflow
from workflows.models import WorkflowObjectRelation
from workflows.models import WorkflowModelRelation
from workflows.models import WorkflowPermissionRelation
def retrieve_object_id_from_path(request):
#TODO: is there a better way ?
# ex: u'/admin/paintdb/recipe/203421/'
path_info = request.META['PATH_INFO']
object_id = int(path_info.strip('/').split('/')[-1])
return object_id
class StateAdmin(admin.ModelAdmin):
list_display = ['codename', 'name', 'transition_listing', 'workflow', 'state_position', 'description', ]
list_editable = ['state_position', 'description', ]
list_filter = ['workflow', ]
filter_horizontal = ['transitions', ]
search_fields = ['codename', 'name', ]
def transition_listing(self, obj):
try:
html = '<br />'.join([item.__unicode__() for item in obj.transitions.all()])
except:
html = ''
return html
transition_listing.short_description = _(u'transitions')
transition_listing.allow_tags = True
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'transitions':
try:
state = State.objects.get(id=retrieve_object_id_from_path(request))
queryset = state.workflow.transitions
except:
queryset = Transition.objects.all()
kwargs["queryset"] = queryset
return super(StateAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
class StateInline(admin.StackedInline):
model = State
filter_horizontal = ['transitions', ]
extra = 0
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'transitions':
try:
workflow = Workflow.objects.get(id=retrieve_object_id_from_path(request))
queryset = workflow.transitions
except:
queryset = Transition.objects.all()
kwargs["queryset"] = queryset
return super(StateInline, self).formfield_for_manytomany(db_field, request, **kwargs)
class WorkflowAdmin(admin.ModelAdmin):
inlines = [
StateInline,
]
list_display = ['name', 'initial_state', 'state_listing', 'transition_listing', ]
def state_listing(self, obj):
try:
html = '<br />'.join([item.__unicode__() for item in obj.states.all()])
except:
html = ''
return html
state_listing.short_description = _(u'states')
state_listing.allow_tags = True
def transition_listing(self, obj):
try:
html = '<br />'.join([item.__unicode__() for item in obj.transitions.all()])
except:
html = ''
return html
transition_listing.short_description = _(u'transitions')
transition_listing.allow_tags = True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "initial_state":
try:
workflow = Workflow.objects.get(id=retrieve_object_id_from_path(request))
queryset = workflow.states.all()
except:
queryset = State.objects.all()
kwargs["queryset"] = queryset
return super(WorkflowAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TransitionAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'destination', 'direction', 'permission_listing', 'workflow', ]
list_filter = ['workflow', ]
def permission_listing(self, obj):
try:
html = '<br />'.join([item.__unicode__() for item in obj.permissions.all()])
except:
html = ''
return html
permission_listing.short_description = _(u'permissions')
permission_listing.allow_tags = True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'destination':
try:
transition = Transition.objects.get(id=retrieve_object_id_from_path(request))
queryset = transition.workflow.states
except:
queryset = State.objects.all()
kwargs["queryset"] = queryset
return super(TransitionAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TransitionObjectRelationAdmin(admin.ModelAdmin):
list_display = ['datetime', 'content', 'state', 'user', ]
date_hierarchy = 'datetime'
search_fields = ['user__username', ]
class WorkflowModelRelationAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'content_type', 'workflow', ]
list_filter = ['workflow', ]
class WorkflowPermissionRelationAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'workflow', 'permission', ]
list_filter = ['workflow', ]
class StatePermissionRelationAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'state', 'permission', 'role', ]
class StateInheritanceBlockAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'state', 'permission', ]
admin.site.register(Workflow, WorkflowAdmin)
admin.site.register(State, StateAdmin)
admin.site.register(StateInheritanceBlock, StateInheritanceBlockAdmin)
admin.site.register(StateObjectRelation)
admin.site.register(StatePermissionRelation, StatePermissionRelationAdmin)
admin.site.register(Transition, TransitionAdmin)
admin.site.register(WorkflowObjectRelation)
admin.site.register(WorkflowModelRelation, WorkflowModelRelationAdmin)
admin.site.register(WorkflowPermissionRelation, WorkflowPermissionRelationAdmin)
admin.site.register(TransitionObjectRelation, TransitionObjectRelationAdmin)
|
import RPi.GPIO as GPIO
import time
leds = [20,21,4,17,27]
GPIO.setmode(GPIO.BCM)
for led in leds:
GPIO.setup(led,GPIO.OUT)
GPIO.output(led,GPIO.LOW)
i=0
while i <30:
for led in leds:
GPIO.output(led,GPIO.HIGH)
time.sleep(0.5)
GPIO.output(led,GPIO.LOW)
i+=1 # i = i + 1
GPIO.cleanup()
|
salario = float(input('Informe seu salário: '))
if salario > 1250:
ajuste = 0.10 * salario
ajuste += salario
print('O seu salário de R${:.2f} sofreu aumento de 10%. Reajuste salárial: R${:.2f}'.format(salario, ajuste))
else:
ajuste = 0.15 * salario
ajuste += salario
print('O seu salário de R${:.2f} sofreu aumento de 15%. Reajuste salárial: R${:.2f}'.format(salario, ajuste))
|
"""
aggregators
This file maintains methods for path aggregation
- Rakshit Agrawal, 2018
"""
import numpy as np
import tensorflow as tf
kl = tf.keras.layers
class Aggregators(object):
def __init__(self,
node_count,
path_lengths,
additional_embeddings=None,
ordered_args=None):
self.node_count = node_count
self.path_lengths = path_lengths
self.additional_embeddings = additional_embeddings
self.ordered_args = ordered_args if not None else {}
# Set the problem for output layers
self.problem = self.ordered_args.get('problem', 'link')
def get_build_method(self, model_name):
model_ref = {
'avg_pool': self.build_mean_model,
'dense_max': self.build_dense_max_model,
'seq_of_seq': self.build_seq_of_seq_model,
'edge_conv': self.build_edge_conv_model
}
return model_ref.get(model_name, None)
def get_final_output_layer(self, n_classes=None):
""" Create final layer of model based on problem type. """
if n_classes is not None and isinstance(n_classes, int):
return kl.Dense(n_classes, activation='softmax', name='final_val')
if self.problem == 'link':
return kl.Dense(1, activation='sigmoid', name='final_val')
if self.problem == 'wsn':
if self.ordered_args.get('regression_only',False):
return kl.Dense(1, name='final_val')
return kl.Dense(1, activation='tanh', name='final_val')
def build_mean_model(self,
emb_dims=32,
dense_dims=32,
classifier_dims=32,
dropout=0.5,
known_embeddings=None,
show_summary=True):
""" Build a mean model """
if isinstance(dense_dims, dict):
assert set(dense_dims.keys()) == set(self.path_lengths.keys())
elif isinstance(dense_dims, int):
dense_dims = {i: dense_dims for i in self.path_lengths}
node_inp = kl.Input((2,), name='node_pair_input')
node_feature_values = []
if known_embeddings is None:
emb = kl.Embedding(input_dim=self.node_count,output_dim=emb_dims,
name='embedding_layer')
else:
assert isinstance(known_embeddings, kl.Embedding)
emb = known_embeddings
node_emb = emb(node_inp)
processed_node_pair = kl.Flatten()(node_emb)
node_feature_values.append(processed_node_pair)
if self.additional_embeddings is not None:
# Add node features through more embedding layers
if isinstance(self.additional_embeddings, list):
assert all([isinstance(i, np.ndarray) for i in self.additional_embeddings])
elif isinstance(self.additional_embeddings, np.ndarray):
self.additional_embeddings = [self.additional_embeddings]
else:
raise ValueError("Unknown embedding type provided.")
for i, emb_weights in enumerate(self.additional_embeddings):
emb_layer = kl.Embedding(input_dim=emb_weights.shape[0],
output_dim=emb_weights.shape[1],
weights=[emb_weights],
trainable=False,
name='node_features_{}'.format(i+1))
node_features = emb_layer(node_inp)
processed_features = kl.Flatten()(node_features)
node_feature_values.append(processed_features)
path_inps = {}
path_embs = {}
processed_paths = {}
for path_len in self.path_lengths:
path_inps[path_len] = kl.Input((None, path_len), name='path_%d_input' % path_len)
path_embs[path_len] = emb(path_inps[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.Flatten(name='flatten_for_%d_paths' % path_len)
)(path_embs[path_len])
processed_paths[path_len] = kl.GlobalAveragePooling1D(name='final_mean_pool_for_%d_paths' % path_len)(
processed_paths[path_len])
combined = kl.Concatenate()(node_feature_values + processed_paths.values())
d2_out = kl.Dense(classifier_dims, name='dense_on_combined')(combined)
d2_out = kl.Dropout(dropout)(d2_out)
out = self.get_final_output_layer()(d2_out)
model = tf.keras.Model(inputs=[node_inp] + path_inps.values(), outputs=out)
if show_summary:
model.summary()
return model
def build_dense_max_model(self,
emb_dims=32,
dense_dims=32,
classifier_dims=32,
dropout=0.5,
known_embeddings=None,
show_summary=True):
""" Build a dense max model """
if isinstance(dense_dims, dict):
assert set(dense_dims.keys()) == set(self.path_lengths.keys())
elif isinstance(dense_dims, int):
dense_dims = {i: dense_dims for i in self.path_lengths}
node_inp = kl.Input((2,), name='node_pair_input')
node_feature_values = []
if known_embeddings is None:
emb = kl.Embedding(input_dim=self.node_count, output_dim=emb_dims,
name='embedding_layer')
else:
assert isinstance(known_embeddings, kl.Embedding)
emb = known_embeddings
node_emb = emb(node_inp)
processed_node_pair = kl.Flatten()(node_emb)
node_feature_values.append(processed_node_pair)
if self.additional_embeddings is not None:
# Add node features through more embedding layers
if isinstance(self.additional_embeddings, list):
assert all([isinstance(i, kl.Embedding) for i in self.additional_embeddings])
elif isinstance(self.additional_embeddings, kl.Embedding):
self.additional_embeddings = [self.additional_embeddings]
else:
raise ValueError("Unkonwn embedding type provided.")
for emb_layer in self.additional_embeddings:
node_features = emb_layer(node_inp)
processed_features = kl.Flatten()(node_features)
node_feature_values.append(processed_features)
path_inps = {}
path_embs = {}
processed_paths = {}
for path_len in self.path_lengths:
path_inps[path_len] = kl.Input((None, path_len), name='path_%d_input' % path_len)
path_embs[path_len] = emb(path_inps[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.Flatten(name='flatten_for_%d_paths' % path_len)
)(path_embs[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.Dense(dense_dims[path_len], name='dense_for_%d_paths' % path_len),
name='td_dense_for_%d_paths' % path_len)(processed_paths[path_len])
processed_paths[path_len] = kl.Dropout(dropout)(processed_paths[path_len])
processed_paths[path_len] = kl.GlobalMaxPooling1D(name='final_max_pool_for_%d_paths' % path_len)(
processed_paths[path_len])
combined = kl.Concatenate()(node_feature_values + processed_paths.values())
d2_out = kl.Dense(classifier_dims, name='dense_on_combined')(combined)
d2_out = kl.Dropout(dropout)(d2_out)
out = self.get_final_output_layer()(d2_out)
model = tf.keras.Model(inputs=[node_inp] + path_inps.values(), outputs=out)
if show_summary:
model.summary()
return model
def build_seq_of_seq_model(self,
emb_dims=32,
dense_dims=32,
classifier_dims=32,
dropout=0.5,
known_embeddings=None,
show_summary=True
):
""" Build a sequence of sequence model """
if isinstance(dense_dims, dict):
assert set(dense_dims.keys()) == set(self.path_lengths.keys())
elif isinstance(dense_dims, int):
dense_dims = {i: dense_dims for i in self.path_lengths}
node_inp = kl.Input((2,), name='node_pair_input')
node_feature_values = []
if known_embeddings is None:
emb = kl.Embedding(input_dim=self.node_count, output_dim=emb_dims,
name='embedding_layer')
else:
assert isinstance(known_embeddings, kl.Embedding)
emb = known_embeddings
node_emb = emb(node_inp)
processed_node_pair = kl.Flatten()(node_emb)
node_feature_values.append(processed_node_pair)
if self.additional_embeddings is not None:
# Add node features through more embedding layers
if isinstance(self.additional_embeddings, list):
assert all([isinstance(i, kl.Embedding) for i in self.additional_embeddings])
elif isinstance(self.additional_embeddings, kl.Embedding):
self.additional_embeddings = [self.additional_embeddings]
else:
raise ValueError("Unkonwn embedding type provided.")
for emb_layer in self.additional_embeddings:
node_features = emb_layer(node_inp)
processed_features = kl.Flatten()(node_features)
node_feature_values.append(processed_features)
path_inps = {}
path_embs = {}
processed_paths = {}
for path_len in self.path_lengths:
path_inps[path_len] = kl.Input((None, path_len), name='path_%d_input' % path_len)
path_embs[path_len] = emb(path_inps[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.LSTM(dense_dims[path_len], return_sequences=True, name='lstm_for_%d_paths' % path_len),
name='td_lstm_for_%d_paths' % path_len)(path_embs[path_len])
processed_paths[path_len] = kl.Dropout(dropout)(processed_paths[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.GlobalMaxPool1D(name='global_max_pool_for_%d_paths' % path_len),
name='td_for_global_max_pool_for_%d_paths' % path_len)(processed_paths[path_len])
processed_paths[path_len] = kl.LSTM(dense_dims[path_len] * 2, return_sequences=True,
name='lstm_for_%d_paths' % path_len)(processed_paths[path_len])
processed_paths[path_len] = kl.GlobalMaxPooling1D(name='final_max_pool_for_%d_paths' % path_len)(
processed_paths[path_len])
combined = kl.Concatenate()(node_feature_values + processed_paths.values())
d2_out = kl.Dense(classifier_dims, name='dense_on_combined')(combined)
d2_out = kl.Dropout(dropout)(d2_out)
out = self.get_final_output_layer()(d2_out)
model = tf.keras.Model(inputs=[node_inp] + path_inps.values(), outputs=out)
if show_summary:
model.summary()
return model
def build_edge_conv_model(self,
emb_dims=32,
dense_dims=32,
classifier_dims=32,
dropout=0.5,
known_embeddings=None,
show_summary=True
):
""" Build an edge conv model """
if isinstance(dense_dims, dict):
assert set(dense_dims.keys()) == set(self.path_lengths.keys())
elif isinstance(dense_dims, int):
dense_dims = {i: dense_dims for i in self.path_lengths}
node_inp = kl.Input((2,), name='node_pair_input')
node_feature_values = []
if known_embeddings is None:
emb = kl.Embedding(input_dim=self.node_count, output_dim=emb_dims,
name='embedding_layer')
else:
assert isinstance(known_embeddings, kl.Embedding)
emb = known_embeddings
node_emb = emb(node_inp)
processed_node_pair = kl.Flatten()(node_emb)
node_feature_values.append(processed_node_pair)
if self.additional_embeddings is not None:
# Add node features through more embedding layers
if isinstance(self.additional_embeddings, list):
assert all([isinstance(i, kl.Embedding) for i in self.additional_embeddings])
elif isinstance(self.additional_embeddings, kl.Embedding):
self.additional_embeddings = [self.additional_embeddings]
else:
raise ValueError("Unkonwn embedding type provided.")
for emb_layer in self.additional_embeddings:
node_features = emb_layer(node_inp)
processed_features = kl.Flatten()(node_features)
node_feature_values.append(processed_features)
path_inps = {}
path_embs = {}
processed_paths = {}
for path_len in self.path_lengths:
path_inps[path_len] = kl.Input((None, path_len), name='path_%d_input' % path_len)
path_embs[path_len] = emb(path_inps[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.Conv1D(filters=emb_dims, kernel_size=2, strides=1,
name='conv_for_%d_paths' % path_len),
name='td_conv_for_%d_paths' % path_len)(path_embs[path_len])
processed_paths[path_len] = kl.Dropout(dropout)(processed_paths[path_len])
processed_paths[path_len] = kl.TimeDistributed(
kl.GlobalMaxPool1D(name='global_max_pool_for_%d_paths' % path_len),
name='td_for_global_max_pool_for_%d_paths' % path_len)(processed_paths[path_len])
processed_paths[path_len] = kl.LSTM(dense_dims[path_len] * 2, return_sequences=True,
name='lstm_for_%d_paths' % path_len)(processed_paths[path_len])
processed_paths[path_len] = kl.GlobalMaxPooling1D(name='final_max_pool_for_%d_paths' % path_len)(
processed_paths[path_len])
combined = kl.Concatenate()(node_feature_values + processed_paths.values())
d2_out = kl.Dense(classifier_dims, name='dense_on_combined')(combined)
d2_out = kl.Dropout(dropout)(d2_out)
out = self.get_final_output_layer()(d2_out)
model = tf.keras.Model(inputs=[node_inp] + path_inps.values(), outputs=out)
if show_summary:
model.summary()
return model
if __name__ == "__main__":
# Test
ag = Aggregators(node_count=200, path_lengths=[3, 4])
model = ag.build_mean_model()
model = ag.build_dense_max_model()
model = ag.build_seq_of_seq_model()
model = ag.build_edge_conv_model()
|
import os
import wx
from pprint import pprint
# from ..Views.StatsPanel.PlayersPanel import NCAABPlayerStatsPanel, NBAPlayerStatsPanel
from ..Views.TeamPanels.StatsPanel import NCAABStatsPanel, NBAStatsPanel, MLBStatsPanel
from ..Views.GamePanels.GameLinePanel import GameLinePanel
from .GamePanels.TitlePanel import TitlePanel
# from ..Views.GamePanels.PredictPanel import PredictPanel
timeframeList = ["2weeks", "1month", "6weeks", "season"]
logoPath = os.environ["HOME"] + "/Yahoo/{}/logos/{}.png"
class MatchupPage(wx.Panel):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.titlePanel = TitlePanel(self)
self.timeBox = wx.ComboBox(self, choices=["2weeks", "1month", "6weeks", "season"])
self.timeBox.SetSelection(0)
self.timeBox.Bind(wx.EVT_COMBOBOX, self.onChange)
self.mainPanel = wx.Notebook(self)
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.mainPanel.SetSizer(self.mainSizer)
self.gameLinePanel = GameLinePanel(self.mainPanel)
# self.predictPanel = PredictPanel(self.mainPanel)
# self.predictPanel.setPanel({"regs": self.info["regs"], "teamStats": self.info["teamStats"]})
self.teamStatsPanel = self.getStatsPanel(self.mainPanel)
# self.playerStatsPanel = self.getPlayerPanel(self.mainPanel)
self.mainPanel.AddPage(self.gameLinePanel, "gL")
self.mainPanel.AddPage(self.teamStatsPanel, "teamS")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.titlePanel, 0, wx.EXPAND)
sizer.Add(self.timeBox, 0, wx.CENTER)
sizer.Add(self.mainPanel, 1, wx.EXPAND)
self.SetSizer(sizer)
def setPanel(self, info):
self.info = info
tF = self.timeBox.GetStringSelection()
self.titlePanel.setPanel({"odds": info["odds"][-1]["99"], "details": info["teamDetails"], "records": info["teamRecords"][tF]})
self.gameLinePanel.setPanel({"games": self.info["gameLines"][tF], "homeId": self.info["homeId"], "awayId": self.info["awayId"], "commonOpp": self.info["commonOpp"][tF]})
self.teamStatsPanel.setPanel(self.info["teamStats"][tF])
self.Layout()
def onChange(self, event):
self.setPanel(self.info)
def getStatsPanel(self, parent):
raise AssertionError
def getPlayerPanel(self, parent):
raise AssertionError
class MLBMatchupPage(MatchupPage):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
def getStatsPanel(self, parent):
return MLBStatsPanel(parent)
class NBAMatchupPage(MatchupPage):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
def getStatsPanel(self, parent):
return NBAStatsPanel(parent)
|
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import os.path as path
def calibrate_pts(input_path):
objpoints = [] # Objects in real world space
imgpoints = [] # Objects in 2-D space
nx, ny = 9, 6
objp = np.zeros((nx * ny, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
jpg_files = path.join(input_path, '*.jpg')
imgs = glob.glob(jpg_files)
for fname in imgs:
img = mpimg.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
return imgpoints, objpoints
def undistort(img, imgpts, objpts):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpts, imgpts, img.shape[1::-1], None, None)
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
return undistorted
|
from rest_framework import serializers
from wq.db.rest.serializers import ModelSerializer
from wq.db.patterns import serializers as patterns
from vera.models import Site
from .models import Parameter, Report
from vera.results.serializers import ResultSerializer as VeraResultSerializer
from vera.series.serializers import ReportSerializer as VeraReportSerializer
class ParameterInlineSerializer(patterns.AttachmentSerializer):
class Meta(patterns.AttachmentSerializer.Meta):
model = Parameter
exclude = ('campaign',)
object_field = 'campaign'
class CampaignSerializer(patterns.AttachedModelSerializer):
parameters = ParameterInlineSerializer(many=True, required=False)
icon = serializers.FileField(required=False)
def to_internal_value(self, data):
data = data.copy()
if 'request' in self.context:
user = self.context['request'].user
if user.is_authenticated():
data['creator_id'] = user.pk
return super(CampaignSerializer, self).to_internal_value(data)
def to_representation(self, obj):
result = super(CampaignSerializer, self).to_representation(obj)
result.pop('creator', None)
return result
class ResultSerializer(VeraResultSerializer):
class Meta(VeraResultSerializer.Meta):
type_filter = {
'campaign_id': '{{campaign_id}}',
}
class ReportSerializer(VeraReportSerializer):
results = ResultSerializer(many=True)
def to_representation(self, obj):
# FIXME: See https://github.com/wq/wq.db/issues/61
result = super(ReportSerializer, self).to_representation(obj)
if result.get('results', None):
return result
campaign_id = (
'request' in self.context and
self.context['request'].GET.get('campaign_id', None)
)
if not campaign_id:
return result
result['results'] = [
{
'@index': i,
'type': self.router.serialize(parameter),
'type_label': str(parameter),
'type_id': parameter.slug,
}
for i, parameter in enumerate(Parameter.objects.filter(
campaign__slug=campaign_id
))
]
return result
def to_internal_value(self, data):
# In vera, Site is usually managed as a separate table, but we want to create
# new sites on the fly for this demo.
if data.get('latitude', None) and data.get('longitude', None):
lat = float(data['latitude'])
lng = float(data['longitude'])
site, is_new = Site.objects.get_or_create(
name="%s, %s" % (round(lat, 3), round(lng, 3)),
latitude=lat,
longitude=lng,
)
data = data.copy()
data['event[site][slug]'] = site.slug
return super(ReportSerializer, self).to_internal_value(data)
class EventResultSerializer(serializers.Serializer):
parameter = serializers.ReadOnlyField(source="result_type.__str__")
units = serializers.ReadOnlyField(source="result_type.units")
value = serializers.ReadOnlyField(source="result_value")
def get_wq_config(self):
# FIXME: See https://github.com/wq/wq.db/issues/60
return {'form': []}
class EventSerializer(ModelSerializer):
latitude = serializers.ReadOnlyField(source='site.latitude')
longitude = serializers.ReadOnlyField(source='site.longitude')
valid_reports = ModelSerializer.for_model(
Report, include_fields="__all__"
)(many=True)
photo = serializers.SerializerMethodField()
results = EventResultSerializer(many=True, source='eventresult_set')
def get_photo(self, instance):
report = instance.valid_reports.exclude(photo='').first()
if report:
return report.photo.name
|
"""
Copyright 2022 IBM Corporation All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
# pylint: disable=global-statement,global-variable-not-assigned,invalid-name
from flask import Flask, Response, jsonify, request
app = Flask(__name__)
DEFAULT_ROOT_CONFIGS = {
"start_search": {"status_code": 200},
"results": {"status_code": 200},
"status": {"status_code": 200},
}
DEFAULT_SEARCHES = []
DEFAULT_SEARCH_DATA = []
route_configs = DEFAULT_ROOT_CONFIGS.copy()
searches = DEFAULT_SEARCHES.copy()
search_data = DEFAULT_SEARCH_DATA.copy()
# Configuration endpoints
@app.route("/conf/route_configs/<endpoint>", methods=["POST"])
def set_route_config(endpoint):
global route_configs
route_configs[endpoint] = request.json
return ""
@app.route("/conf/add_search_data", methods=["POST"])
def add_search_data():
global search_data
search_data = search_data + request.json
return ""
@app.route("/conf/reset", methods=["POST"])
def reset():
global route_configs
global searches
global search_data
route_configs = DEFAULT_ROOT_CONFIGS.copy()
searches = DEFAULT_SEARCHES.copy()
search_data = DEFAULT_SEARCH_DATA.copy()
return ""
# Mock endpoints
@app.route("/api/ariel/searches", methods=["POST"])
def start_search():
global route_configs
global searches
global search_data
conf = route_configs["start_search"]
if not conf["status_code"] == 200:
return Response("Failure!", status=conf["status_code"])
search_id = 0
for search in searches:
if search["id"] >= search_id:
search_id = search["id"] + 1
data = {}
if len(search_data) >= 1:
data = search_data.pop(0)
search = {"id": search_id, "data": data}
searches.append(search)
print(searches)
return jsonify({"search_id": search_id})
@app.route("/api/ariel/searches/<int:search_id>", methods=["GET"])
def get_status(search_id):
global route_configs
global searches
conf = route_configs["status"]
if not conf["status_code"] == 200:
return Response("Failure!", status=conf["status_code"])
print(searches)
for search in searches:
if search["id"] == search_id:
return jsonify({"status": "COMPLETED"})
return Response(f"No search found with id {search_id}", status=404)
@app.route("/api/ariel/searches/<int:search_id>/results", methods=["GET"])
def get_results(search_id):
global route_configs
global searches
conf = route_configs["results"]
if not conf["status_code"] == 200:
return Response("Failure!", status=conf["status_code"])
for search in searches:
if search["id"] == search_id:
return jsonify(search["data"])
return Response(f"No search found with id {search_id}", status=404)
if __name__ == "__main__":
context = ("server.cert", "server.key")
app.run(debug=True, ssl_context=context, host='0.0.0.0', port=443, threaded=False)
|
# file: mean_deviation.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2018 R.F. Smith <[email protected]>.
# SPDX-License-Identifier: MIT
# Created: 2018-04-21T19:07:27+0200
# Last modified: 2019-07-13T10:16:34+0200
"""
Calculate the mean deviation of samples.
See http://www.leeds.ac.uk/educol/documents/00003759.htm
This number is independent of the distribution.
"""
import statistics as stat
def amd(data):
"""
Calculate the absolute mean deviation of a sequence of numbers.
This calculates the mean of the sequence. Then all distances to the mean.
It returns the mean of those distances
Arguments:
data: A sequence of numbers.
Returns:
The absolute mean deviation.
"""
m = stat.mean(data)
diff = [abs(n - m) for n in data]
return stat.mean(diff)
|
from tkinter import ttk
class Treeview(ttk.Treeview):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heading("#0", text="Versions")
@property
def selected_item(self):
return self.item(self.selection())
def clean(self):
children = self.get_children()
if children:
for child in children:
self.delete(child)
def insert_items(self, items, highlight=None):
assert isinstance(items, (tuple, list))
for item in items:
if highlight:
if item == highlight:
item = '> ' + item
else:
item = ' ' + item
self.insert('', 0, text=item)
|
from Board import Board
from Ship import Ship
from SimulationResult import SimulationResult
from SimulationStatistics import SimulationStatistics
import functionalComponents
import random, copy
#Represents a game of battleship. Using a board of set ships, the engine will attempt to compute positions of the ships in an attempt to sink all the ships. The engine will determine how many turns have passed after all ships are sunk. The fewer the turns, the better the engine is.
class BattleshipEngine:
def __init__(self):
self.board = Board(8)
self.simulationResuts = {}
def PrintBoard(self):
self.board.PrintBoard()
def SetNewBoard(self):
#setup new board
self.board = Board(8)
#place 5 ships in random coordinates
self.board.PlaceShipAtRandomCoordinate(Ship(5, 'A'))
self.board.PlaceShipAtRandomCoordinate(Ship(4, 'B'))
self.board.PlaceShipAtRandomCoordinate(Ship(3, 'S'))
self.board.PlaceShipAtRandomCoordinate(Ship(3, 'S'))
self.board.PlaceShipAtRandomCoordinate(Ship(2, 'C'))
self.board.initalTileListState = copy.deepcopy(self.board.tileList)
#runs the Battleship simulations against a set number of attack strategies.
def StartBattleshipSimulation(self, iterations):
for x in range(0, iterations):
#start a new random board
self.SetNewBoard()
#start the simulation for the horizontal attack
simulationResult = self.HorizontalLinearAttackStrategy()
#add the results to the dictionary
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
#reset the board
self.board.PrintBoard()
self.board.ResetBoard()
self.board.PrintBoard()
#start the simulation for the vertical attack
simulationResult = self.VerticalLinearAttackStrategy()
#add the results to the dictionary
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
#reset the board
self.board.PrintBoard()
self.board.ResetBoard()
self.board.PrintBoard()
def DEVStartBattleshipSimulation(self, iterations):
self.SetNewBoard()
for x in range(0, iterations):
simulationResult = self.DiagonalHitScanAttackStratgy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.DiagonalLinearAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.RandomHitScanAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.VerticalLinearAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.HorizontalLinearAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
stats = SimulationStatistics(self.simulationResuts.values())
stats.PrintSimulationStatistics()
#allows the user to attack by entering coordinates
def AttackStrategyUserInput(self):
moves = 0
while(not self.board.CheckIfAllShipsSunk()):
print('Not sunk')
moves += 1
self.board.PrintBoard()
while True:
print('Enter a starting coordinate for the ship:')
x = input('Enter x coordinate: ')
y = input('Enter y coordinate: ')
if(functionalComponents.CoordinateString(x,y) in self.board.tileList and (self.board.tileList[functionalComponents.CoordinateString(x,y)].code != self.board.missTileCode or self.board.tileList[functionalComponents.CoordinateString(x,y)].code != self.board.hitTileCode)):
break
y = int(y)
self.board.AttackBoard(functionalComponents.CoordinateString(x,y))
#Attacks from an inital starting point left to right
def HorizontalLinearAttackStrategy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
#loop until all the ships are sunk
#calculate the next position to attack
while(not self.board.CheckIfAllShipsSunk()):
#self.board.PrintBoard()
currentTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
if(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasEastTile):
x = chr(ord(x) + 1)
y = 0
else:
y += 1
coordinateList.append(functionalComponents.CoordinateString(x, y))
self.board.AttackBoard(functionalComponents.CoordinateString(x,y))
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Horizontal Linear")
#attacks top to bottom, starting at a random point and moving down each row, then to the next column
def VerticalLinearAttackStrategy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
#loop until all the ships are sunk
#calculate the next position to attack
while(not self.board.CheckIfAllShipsSunk()):
#self.board.PrintBoard()
currentTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
if(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasSouthTile):
x = startingChar
y += 1
else:
x = chr(ord(x) + 1)
coordinateList.append(functionalComponents.CoordinateString(x, y))
self.board.AttackBoard(functionalComponents.CoordinateString(x, y))
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Vertical Linear")
#randomly attacks coordinates until a hit is registers. then attack each adjacent tile until each direction registers a miss or is off the board
def RandomHitScanAttackStrategy(self):
coordinateList = []
validCoordinateList = []
moves = 0
#set all adjacent flags to false until a hit is registered
checkNorth = False
checkSouth = False
checkWest = False
checkEast = False
currentCoordinate = ''
#build a list of all coordinates
availableCoordinates = self.board.GetAvailableCoordinateList()
#loop until all ships are sunk
while(not self.board.CheckIfAllShipsSunk()):
#if all check flags are set to false, calc a new random coordinate that is available
if(not checkNorth and not checkSouth and not checkWest and not checkEast):
currentCoordinate = random.choice(availableCoordinates)
initialCoordinate = currentCoordinate
elif(checkNorth):
while(checkNorth):
currentCoordinate = functionalComponents.MoveCoordinateNorth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkSouth):
while(checkSouth):
currentCoordinate = functionalComponents.MoveCoordinateSouth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkWest):
while(checkWest):
currentCoordinate = functionalComponents.MoveCoordinateWest(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkEast):
while(checkEast):
currentCoordinate = functionalComponents.MoveCoordinateEast(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
#set back to the original coordinate
currentCoordinate = initialCoordinate
#adjust check flags to the new coordinate
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates
#attack with the generated coordinate
if(currentCoordinate in availableCoordinates):
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Random Hitscan")
#starts with a random tile on the board. moves diagonally, down and to the left after each attack.
def DiagonalLinearAttackStrategy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
#loop until all the ships are sunk
#calculate the next position to attack
while(not self.board.CheckIfAllShipsSunk()):
currentTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
if(not currentTile.hasWestTile and not currentTile.hasNorthTile):
x = startingChar
y += 1
elif(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasWestTile and not currentTile.hasSouthTile):
x = chr(ord(startingChar) + 1)
y = self.board.size - 1
elif(not currentTile.hasSouthTile and functionalComponents.MoveCoordinateWest(currentTile.GetCoordiante()) != self.board.emptyTileCode):
x = chr(ord(startingChar) + y + 1)
y = self.board.size - 1
elif(not currentTile.hasWestTile):
y = ord(x) - ord(startingChar) + 1
x = startingChar
else:
x = chr(ord(x) + 1)
y -= 1
coordinateList.append(functionalComponents.CoordinateString(x, y))
self.board.AttackBoard(functionalComponents.CoordinateString(x,y))
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Diagonal Linear")
def DiagonalHitScanAttackStratgy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
validCoordinateList = []
#set all adjacent flags to false until a hit is registered
checkNorth = False
checkSouth = False
checkWest = False
checkEast = False
#build a list of all coordinates
availableCoordinates = self.board.GetAvailableCoordinateList()
#loop until all ships are sunk
while(not self.board.CheckIfAllShipsSunk()):
#if all check flags are set to false, calc a new random coordinate that is available
if(not checkNorth and not checkSouth and not checkWest and not checkEast):
currentTile = self.board.tileList[functionalComponents.CoordinateString(x,y)]
if(not currentTile.hasWestTile and not currentTile.hasNorthTile):
x = startingChar
y += 1
elif(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasWestTile and not currentTile.hasSouthTile):
x = chr(ord(startingChar) + 1)
y = self.board.size - 1
elif(not currentTile.hasSouthTile and functionalComponents.MoveCoordinateWest(currentTile.GetCoordiante()) != self.board.emptyTileCode):
x = chr(ord(startingChar) + y + 1)
y = self.board.size - 1
elif(not currentTile.hasWestTile):
y = ord(x) - ord(startingChar) + 1
x = startingChar
else:
x = chr(ord(x) + 1)
y -= 1
currentCoordinate = functionalComponents.CoordinateString(x, y)
initialCoordinate = currentCoordinate
elif(checkNorth):
while(checkNorth):
currentCoordinate = functionalComponents.MoveCoordinateNorth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkSouth):
while(checkSouth):
currentCoordinate = functionalComponents.MoveCoordinateSouth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkWest):
while(checkWest):
currentCoordinate = functionalComponents.MoveCoordinateWest(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkEast):
while(checkEast):
currentCoordinate = functionalComponents.MoveCoordinateEast(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
#set back to the original coordinate
currentCoordinate = initialCoordinate
#adjust check flags to the new coordinate
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates
#attack with the generated coordinate
if(currentCoordinate in availableCoordinates):
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Diagonal Hitscan")
|
import abc
import pdb
from time import time
import helpers.vcommon as CM
from helpers.miscs import Miscs, MP
import settings
import data.prog
import data.symstates
import infer.inv
DBG = pdb.set_trace
mlog = CM.getLogger(__name__, settings.LOGGER_LEVEL)
class _Infer(metaclass=abc.ABCMeta):
"""
Base class for inference
"""
def __init__(self, symstates, prog):
assert symstates is None or \
isinstance(symstates, data.symstates.SymStates), symstates
assert isinstance(prog, data.prog.Prog), prog
self.symstates = symstates
self.inv_decls = prog.inv_decls
self.inp_decls = prog.inp_decls
self.prog = prog
@abc.abstractmethod
def gen(self):
pass
@classmethod
@abc.abstractmethod
def gen_from_traces(cls, traces, symbols):
"""
Generating invariants directly from traces
"""
pass
def get_traces(self, inps, dtraces):
"""
run inps to get new traces (and update them)
"""
assert isinstance(inps, data.traces.Inps) and inps, inps
assert isinstance(dtraces, data.traces.DTraces), dtraces
new_dtraces = self.prog.get_traces(inps)
new_dtraces = dtraces.merge(new_dtraces)
return new_dtraces
def check(self, dinvs, inps):
if self.symstates:
cexs, dinvs = self.symstates.check(dinvs, inps)
else:
# no symbolic states, not performing checking
assert False, "shouldn't get here"
for loc in dinvs:
for inv in dinvs[loc]:
inv.stat = infer.inv.Inv.UNKNOWN
cexs = {}
return cexs, dinvs
class _CEGIR(_Infer, metaclass=abc.ABCMeta):
"""
Find invs using a guess and check iterative CEGIR approach
"""
pass
class _Opt(_Infer, metaclass=abc.ABCMeta):
"""
Find upperbound of polynomial and min/max terms using an SMT solver optimizer
"""
def __init__(self, symstates, prog):
# need prog because symstates could be None
super().__init__(symstates, prog)
def gen(self):
locs = self.inv_decls.keys()
def _terms(loc):
return self.inv_decls[loc].symbolic
# remove terms exceeding maxV
termss = [self.get_terms(_terms(loc)) for loc in locs]
dinvs = infer.inv.DInvs()
if not termss:
return dinvs
mlog.debug(f"checking upperbounds for {sum(map(len, termss))} "
f"terms at {len(locs)} locs")
refs = {
loc: {self.inv_cls(t.mk_le(self.IUPPER)): t for t in terms}
for loc, terms in zip(locs, termss)
}
ieqs = infer.inv.DInvs()
for loc in refs:
for inv in refs[loc].keys():
ieqs.setdefault(loc, infer.inv.Invs()).add(inv)
_, ieqs = self.check(ieqs, inps=None)
ieqs = ieqs.remove_disproved()
tasks = [(loc, refs[loc][t]) for loc in ieqs for t in ieqs[loc]]
mlog.debug(
f"inferring upperbounds for {len(tasks)} terms at {len(locs)} locs")
# computing convex hull
def f(tasks):
return [
(loc, term, self.symstates.maximize(
loc, self.to_expr(term), self.IUPPER))
for loc, term in tasks
]
wrs = MP.run_mp("optimizing upperbound", tasks, f, settings.DO_MP)
dinvs = infer.inv.DInvs()
for loc, term, v in wrs:
if v is None:
continue
inv = self.inv_cls(term.mk_le(v))
inv.set_stat(infer.inv.Inv.PROVED)
dinvs.setdefault(loc, infer.inv.Invs()).add(inv)
return dinvs
def get_terms(self, symbols):
terms = self.my_get_terms(symbols)
mlog.debug(f"{len(terms)} terms for {self.__class__.__name__}")
inps = set(self.inp_decls.names)
if settings.DO_FILTER and inps:
st = time()
excludes = self.get_excludes(terms, inps)
new_terms = [term for term in terms if term not in excludes]
Miscs.show_removed("filter terms", len(
terms), len(new_terms), time() - st)
terms = new_terms
return terms
@staticmethod
@abc.abstractmethod
def to_expr(term):
pass
@staticmethod
@abc.abstractmethod
def inv_cls(term):
pass
@classmethod
@abc.abstractmethod
def my_get_terms(cls, terms, inps):
pass
@staticmethod
@abc.abstractmethod
def get_excludes(term):
pass
@classmethod
def gen_from_traces(cls, traces, symbols):
"""
Compute convex hulls from traces
"""
assert isinstance(traces, data.traces.Traces), traces
assert isinstance(symbols, data.prog.Symbs), symbols
maxV = cls.IUPPER
minV = -1 * maxV
tasks = cls.my_get_terms(symbols.symbolic)
def f(tasks):
rs = [(term, int(max(term.eval_traces(traces))))
for term in tasks]
return rs
wrs = MP.run_mp("getting upperbounds", tasks, f, settings.DO_MP)
ps = []
for term, upperbound in wrs:
if minV <= upperbound <= maxV:
p = cls.inv_cls(term.mk_le(upperbound))
ps.append(p)
return ps
|
"""
:CRAPS又称花旗骰,是美国拉斯维加斯非常受欢迎的一种的桌上赌博游戏。
Craps赌 博游戏
玩家摇 两颗色子 如果第一次摇出7点或11点 玩家胜
如果摇 出2点 3点 12点 庄家胜 其他情况游戏继续
玩家再次要色子 如果摇出7点 庄家胜
如果摇 出第一次摇的点数 玩家胜
否则 游戏继续 玩家继续摇色子
玩家进入游戏时 有1000元的赌 注 全部输 光游戏 结束
"""
from random import randint
money = 1000
while money > 0:
print('你的总资产为:', money)
needs_go_on = False
while True:
try:
debt = int(input('请下注: '))
if 0 < debt <= money:
break
except ValueError:
print('输入为空',end=', ')
first = randint(1, 6) + randint(1, 6)
print('玩家摇出了%d点' % first)
if first == 7 or first == 11:
print('玩家胜!')
money += debt
elif first == 2 or first == 3 or first == 12:
print('庄家胜!')
money -= debt
else:
needs_go_on = True
while needs_go_on:
current = randint(1, 6) + randint(1, 6)
print('玩家摇出了%d点' % current)
if current == 7:
print('庄家胜')
money -= debt
needs_go_on = False
elif current == first:
print('玩家胜')
money += debt
needs_go_on = False
print('你破产了, 游戏结束!')
|
# Generated by Django 3.0.5 on 2020-05-12 10:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0118_calls_need_to_be_part_of_a_funding_instrument'),
('grant_management', '0029_signed_by_multiple_people'),
]
operations = [
migrations.RenameModel(
old_name='Media',
new_name='Medium',
),
migrations.AlterField(
model_name='grantagreement',
name='signed_by',
field=models.ManyToManyField(blank=True, help_text='People who signed the grant agreement', to='project_core.PhysicalPerson'),
),
]
|
# Copyright 2018 Twitter, Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
""" This module contains the main program for Caladrius and will set up all
resources and start the API server """
import os
import sys
import logging
import argparse
from typing import Dict, Any
from caladrius import logs
from caladrius import loader
from caladrius.api.router import create_router
LOG: logging.Logger = logging.getLogger("caladrius.main")
def _create_parser() -> argparse.ArgumentParser:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description=("This is the command line interface for the Caladrius API"
" server"))
parser.add_argument("-c", "--config", required=True,
help=("Path to the config file with data required by "
"all configured models and classes"))
parser.add_argument("-q", "--quiet", required=False, action="store_true",
help=("Optional flag indicating if console log output "
"should be suppressed"))
parser.add_argument("--debug", required=False, action="store_true",
help=("Optional flag indicating if debug level "
"information should be displayed"))
return parser
if __name__ == "__main__":
ARGS: argparse.Namespace = _create_parser().parse_args()
try:
CONFIG: Dict[str, Any] = loader.load_config(ARGS.config)
except FileNotFoundError:
print(f"Config file: {ARGS.config} was not found. Aborting...",
file=sys.stderr)
sys.exit(1)
else:
if not ARGS.quiet:
print("\nStarting Caladrius API...\n")
print(f"Loading configuration from file: {ARGS.config}")
if not os.path.exists(CONFIG["log.file.dir"]):
os.makedirs(CONFIG["log.file.dir"])
LOG_FILE: str = CONFIG["log.file.dir"] + "/app.log"
logs.setup(console=(not ARGS.quiet), logfile=LOG_FILE, debug=ARGS.debug)
try:
ROUTER = create_router(CONFIG)
except ConnectionRefusedError as cr_err:
if ARGS.quiet:
print(str(cr_err), file=sys.stderr)
sys.exit(1)
ROUTER.run(debug=ARGS.debug)
|
from PySide2.QtCore import QUrl, QDir
from PySide2.QtWebEngineWidgets import QWebEngineView
import sys
from HandSimServer import *
sys.path.append('./handsim')
def create_hand_sim_widget(parent=None):
sim_in_new_thread()
view = QWebEngineView(parent)
view.setUrl(QUrl.fromLocalFile(QDir.currentPath() + "/../handjs/index.html"))
return view
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_protect
from bookstore.models import Book
from .cart import Cart
from .forms import CartAddProductForm
from promos.forms import PromoCodeApplyForm
from bookstore.recommender import Recommender
@require_POST
def cart_add(request, book_id):
cart = Cart(request)
book = get_object_or_404(Book, id=book_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(book=book,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, book_id):
cart = Cart(request)
book = get_object_or_404(Book, id=book_id)
cart.remove(book)
if cart:
return redirect('cart:cart_detail')
return redirect('bookstore:book_list')
# TODO: pass book quantity to form
@login_required(login_url='users:user_login')
def cart_detail(request):
cart = Cart(request)
for item in cart:
# for p in Product.objects.filter(translations__title==item['book']):
# if str(p)==str(item['book']):.
# r = Product.objects.translated(name=item['book'])
# print(t)
item['update_quantity_form'] = CartAddProductForm(
initial={'quantity': item['quantity'],
'update': True})
promo_code_apply_form = PromoCodeApplyForm()
r = Recommender()
if cart:
cart_books = [item['book'] for item in cart]
recommended_books = r.suggest_books_for(cart_books,
max_results=3)
return render(request,
'cart/detail.html',
{'cart': cart,
'promo_code_apply_form': promo_code_apply_form,
'recommended_books': recommended_books})
return render(request,
'cart/detail.html', locals())
|
from django.contrib import admin
from .models import Post, Comment, LikeModel, Follow
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('user', 'picture', 'context')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('post', 'user', 'context', 'parent', 'created_at')
@admin.register(LikeModel)
class LikeModelAdmin(admin.ModelAdmin):
list_display = ('post', 'user', 'status', 'created_at')
@admin.register(Follow)
class FollowAdmin(admin.ModelAdmin):
list_display = ('from_user', 'to_user', 'status', 'created_at')
|
from json import dump, load
import click
from giveaway.core.typing import UserName
from giveaway.core.usernames import (
filter_usernames,
prepare_username,
process_usernames,
)
from giveaway.core.winner import (
choose_winners,
find_winners,
verify_winner,
hash_username,
get_date_from_filename,
)
@click.group()
def cli():
"""
Program to choose a winner from a given list of participants. The seed used by PRNG depends only on a list of
participants and a date provided. So for every set of parameters the giveaway results can be reproduced.
"""
pass
@cli.command("prepare")
@click.argument("source", type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument("destination", type=click.Path(file_okay=True, dir_okay=False))
def prepare_original_cli(source, destination):
"""Parse a file with a of participants, clean it up and save to json"""
with open(source) as fp:
lines = (line for line in fp)
valid_lines = filter_usernames(lines)
prepared_usernames = [prepare_username(uname) for uname in valid_lines]
with open(destination, "w") as ofp:
dump(prepared_usernames, ofp)
@cli.command("choose")
@click.argument(
"participants", type=click.Path(exists=True, file_okay=True, dir_okay=False)
)
@click.argument("date", type=click.DateTime(formats=["%d-%m-%Y"]), required=False)
@click.option("--n", default=1, show_default=True)
def choose_winner_cli(participants, date, n):
"""Choose a winner from PARTICIPANTS while using DATE to count seed"""
if date is None:
date = get_date_from_filename(participants)
with open(participants) as fp:
raw_usernames = load(fp)
hashed_participants = process_usernames(raw_usernames)
hashed_winners = choose_winners(hashed_participants, date, n=n)
participants = [prepare_username(uname) for uname in raw_usernames]
winners = find_winners(hashed_winners, participants)
click.echo(" ".join(winners))
@cli.command("prepare_hashed")
@click.argument("source", type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument("destination", type=click.Path(file_okay=True, dir_okay=False))
def prepare_hashed_cli(source, destination):
"""Hash a list of participants from SOURCE and save to DESTINATION"""
with open(source) as fp:
raw_usernames = load(fp)
hashed_participants = process_usernames(raw_usernames)
with open(destination, "w") as ofp:
dump(hashed_participants, ofp)
@cli.command("verify_choice")
@click.argument(
"hashed_participants", type=click.Path(exists=True, file_okay=True, dir_okay=False)
)
@click.argument("date", type=click.DateTime(formats=["%d-%m-%Y"]), required=False)
@click.option(
"--username", default=None, help="username to compare with a winner's hash"
)
@click.option("--n", default=1, show_default=True)
def verify_choice_cli(hashed_participants, date, username, n):
"""
Verify choice using a HASHED_PARTICIPANTS file and DATE. Optionally you can provide a username to verify
that it was chosen.
"""
if date is None:
date = get_date_from_filename(hashed_participants)
with open(hashed_participants) as fp:
hashed_participants = load(fp)
hashed_winners = choose_winners(hashed_participants, date, n=n)
click.echo(f"Winners are: {', '.join(hashed_winners)}.")
if username:
prepared_username = prepare_username(username)
is_winner = any(verify_winner(prepared_username, hashed_winner) for hashed_winner in hashed_winners)
if is_winner:
click.echo(f"Yup! {username} is definitely a winner.")
else:
click.echo(
f"Unfortunately, {username} is not a winner. But don't worry, better luck next time!"
)
@cli.command("verify_participant")
@click.argument(
"hashed_participants", type=click.Path(exists=True, file_okay=True, dir_okay=False)
)
@click.argument("username", type=UserName)
def verify_choice_cli(hashed_participants, username):
"""
Verify given USERNAME is present in a HASHED_PARTICIPANTS file.
"""
with open(hashed_participants) as fp:
hashed_participants = load(fp)
hashed_username = hash_username(prepare_username(username))
click.echo(f"Hashed username: {hashed_username}")
is_in_participants = hashed_username in hashed_participants
if is_in_participants:
click.echo(
f"{username} may not be a winner. But it is present in a list of participants."
)
else:
click.echo(f"{username} is a creep and does not belong here :(")
|
import torch
import shutil
import numpy as np
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
# import cv2
from skimage.transform import resize
import torchvision.transforms as transforms
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def adjust_learning_rate(optimizer, epoch, args, interval):
"""Sets the learning rate to the initial LR decayed by 10 every 100 epochs"""
lr = args.lr
if epoch < interval[0]:
lr = args.lr
elif epoch >= interval[0] and epoch < interval[1]:
lr = args.lr * 0.1
else:
lr = args.lr * 0.01
#lr = args.lr * (0.1 ** (epoch // 100))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def multi_class_auc(all_target, all_output, num_c = None):
from sklearn.preprocessing import label_binarize
# all_output = np.stack(all_output)
all_target = label_binarize(all_target, classes=list(range(0, num_c)))
all_output = label_binarize(all_output, classes=list(range(0, num_c)))
auc_sum = []
for num_class in range(0, num_c):
try:
auc = roc_auc_score(all_target[:, num_class], all_output[:, num_class])
auc_sum.append(auc)
except ValueError:
pass
auc = sum(auc_sum) / (float(len(auc_sum))+1e-8)
return auc
def evaluation_metrics(label, pred, C):
if C==2:
auc = roc_auc_score(label, pred)
else:
auc = multi_class_auc(label, pred, num_c=C)
corrects = np.equal(np.array(label), np.array(pred))
acc = float(sum(corrects)) / len(corrects)
# mean class
precision = precision_score(label, pred, average='macro')
recall = recall_score(label, pred, average='macro')
f1score = f1_score(label, pred, average='macro')
return round(auc, 4), round(acc, 4), round(precision, 4), round(recall, 4), round(f1score, 4)
def showfeature(x, savename):
# trun to numpy
x = x.data.cpu().numpy()
print (x.shape)
box = []
for item in range(0, x.shape[0]):
x_patch = x[item, :, :]
box.append(x_patch)
x_patch = np.stack(box)
x_patch = np.max(x_patch, axis=0)
x_patch = resize(x_patch, (224, 224), order=3, mode='constant',
cval=0, clip=True, preserve_range=True)
x_patch = (x_patch - np.min(x_patch)) / (np.max(x_patch) - np.min(x_patch) + 1e-11)
x_patch = x_patch * 255
x_patch = np.array(x_patch, dtype="uint8")
plt.plot(1), plt.imshow(x_patch, cmap='jet')
plt.axis('off')
plt.savefig(savename, bbox_inches='tight', pad_inches=0)
def showimage(x, savename):
import torchvision.transforms as transforms
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
z = x * torch.tensor(std).view(3, 1, 1).cuda()
z = z + torch.tensor(mean).view(3, 1, 1).cuda()
z = z.cpu()
z = z[[2,1,0], : ,:]
img2 = transforms.ToPILImage()(z)
img2.save(savename)
def get_color_distortion(s=1.0):
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])
return color_distort
def gaussian_blur(x):
from PIL.ImageFilter import GaussianBlur
if np.random.randint(0, 2) == 1:
x = x.filter(GaussianBlur(radius=np.random.uniform(0.1, 2.0)))
return x
|
from termcolor import cprint, colored
from random import randint
from time import sleep
cprint('Printo para jogar Pedra Papel Tesoura com o computador?', 'yellow')
a = colored('-=-' * 20, 'cyan')
print(a)
p1 = str(input(colored('Pedra, Papel... Tesoura! (escreve o que queres jogar) ', 'magenta',))).strip().lower()
print(a)
sleep(1.5)
pc = randint(1, 3) # 1 = pedra 2 = papel 3 = tesoura
if pc == 1:
jpc = 'pedra'
elif pc == 2:
jpc = 'papel'
elif pc == 3:
jpc = 'tesoura'
cprint('(Tu fizeste {} e o computador fez {})'.format(p1, jpc), 'grey', attrs=['concealed'])
d = colored('O computador ganhou!', 'green')
v = colored('Ganhaste ao computador, parabéns!', 'green')
e = colored('Empate', 'yellow')
if p1 == 'tesoura':
if pc == 1:
r = d
elif pc == 2:
r = v
elif pc == 3:
r = e
if p1 == 'pedra':
if pc == 2:
r = d
elif pc == 3:
r = v
elif pc == 1:
r = e
if p1 == 'papel':
if pc == 3:
r = d
elif pc == 1:
r = v
elif pc == 2:
r = e
if p1 != 'tesoura' and p1 != 'pedra' and p1 != 'papel':
cprint('ERRO', 'red', attrs=['underline'])
else:
print(r)
|
from augustine_text.sample_text import words
# 75K words of procedurally generated text
# This is about the length of novel.
text = words(75000)
text_length = len(text)
print(text[:100])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# hello-world.py
# MIT License
# Copyright (c) <2021> <[email protected]>
# This is a test program using glade and gtk through pygobject
#TODO: Figure out why it goes transparent every two color flashes.
# I've found that both gnome-terminal and byobu terminal have the transparency while guake does not.
# Maybe guake is the odd one.
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from MyWindow import MyWindow
def main(args):
theWindow = MyWindow()
Gtk.main()
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
from datetime import datetime, timedelta
from jinja2 import Template as JinjaTemplate
from typing import Any, Callable, Dict, Tuple, Union
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.event import TrackTemplate, TrackTemplateResult, async_track_template_result
from homeassistant.helpers.template import LoggingUndefined, Template, TemplateEnvironment
from homeassistant.helpers.typing import ConfigType
from .. import const as co
from .store import ReactionEntry
from .dispatcher import get as Dispatcher
from .common import Updatable, Unloadable, callable_type
RUNTIME_VALUES = ["actor"]
class RuntimeValueUsedError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class NonLoggingUndefined(LoggingUndefined):
def _fail_with_undefined_error(self, *args, **kwargs):
if self._undefined_name in RUNTIME_VALUES:
raise RuntimeValueUsedError()
else:
raise Exception()
class ValidateEnv(TemplateEnvironment):
def __init__(self, hass, limited=False, strict=False):
super().__init__(hass, limited, strict)
self.undefined = NonLoggingUndefined
class TemplateWatcher(Updatable):
def __init__(self, hass: HomeAssistant, owner: Any, property: str, type_converter: Any, template: Template, variables: dict):
super().__init__(hass)
self.owner = owner
self.property = property
self.type_converter = type_converter
self.template = template
self.static_variables = variables
template.hass = hass
setattr(owner, property, None)
self.needs_runtime_values = False
def template_needs_runtime_values(template: Template, kwargs: dict):
result = False
if not(template.is_static):
try:
validate_env = ValidateEnv(hass, False, False)
validate_template = validate_env.compile(template.template)
jinja_template = JinjaTemplate.from_code(validate_env, validate_template, validate_env.globals, None)
jinja_template.render(**kwargs)
except RuntimeValueUsedError:
result = True
except Exception:
result = False
return result
self.needs_runtime_values = template_needs_runtime_values(template, self.static_variables)
self.runtime_variables = self.static_variables | (co.RUNTIME_VARIABLES if self.needs_runtime_values else {})
self.result_info = async_track_template_result(hass, [TrackTemplate(template, self.runtime_variables)], self.async_update_template)
self.async_remove = self.result_info.async_remove
self.async_refresh = self.result_info.async_refresh
self.result_info.async_refresh()
@callback
def async_update_template(self, event: Union[Event, None], updates: list[TrackTemplateResult]):
if updates and len(updates) > 0:
result = updates.pop().result
if isinstance(result, TemplateError):
co.LOGGER.error("Config", "Error rendering {}: {}", self.property, result)
return
if hasattr(self.owner, "set_property"):
self.owner.set_property(self.property, self.type_converter(result))
else:
self.owner.__setattr__(self.property, self.type_converter(result))
self.async_update()
def runtime_value(self, additional_variables: dict):
runtime_variables = self.static_variables | additional_variables
result = None
try:
result = self.template.async_render(runtime_variables)
except TemplateError:
co.LOGGER("Could not evaluate runtime value for '{}'".format(self.property))
return result
class WorkflowContext(Updatable, Unloadable):
_on_unload: Union[list[callable_type], None] = None
_templates_with_variables: Union[list[TemplateWatcher], None] = None
def __init__(self, hass: HomeAssistant, workflow_id: str) -> None:
super().__init__(hass)
self.hass = hass
self.workflow_id = workflow_id
self._templates_with_variables = []
self.on_unload(self._templates_with_variables.clear) # Make sure the list of watchers is cleared when context is unloaded
def load_variables(self, variables_config: dict):
self.variables = type('object', (), {})()
for key,value in variables_config.items():
if isinstance(value, Template):
self.create_variable_template_watcher(self.variables, key, co.PROP_TYPE_SOURCE, value)
else:
setattr(self.variables, key, value)
def create_template_watcher(self, owner: Any, property: str, type_converter: Any, template: Template, use_variables: bool = True):
result = TemplateWatcher(self.hass, owner, property, type_converter, template, vars(self.variables) if use_variables else {})
result.on_update(self.async_update) # When the watcher gets updated the context should be updated
self.on_unload(result.async_remove) # When the context is unloaded the watcher should be unloaded
if use_variables:
self._templates_with_variables.append(result)
return result
def create_variable_template_watcher(self, owner: Any, property: str, type_converter: Any, template: Template):
result = self.create_template_watcher(owner, property, type_converter, template, False)
result.on_update(self.async_shake) # When the watcher of a variable gets updated all depending watchers should be updated
return result
@callback
def async_shake(self):
for watcher in self._templates_with_variables:
self.hass.add_job(watcher.async_refresh)
class PropertyContainer:
def __init__(self, context: WorkflowContext):
self.context = context
self.watchers_with_need_for_runtime_values: dict[str, TemplateWatcher] = {}
def init_property(self, name: str, type_converter: Any, config: dict, stencil: dict, default: Any = None):
value = self.get_property(name, config, stencil, default)
self.init_property_value(name, type_converter, value)
def init_property_value(self, name: str, type_converter: Any, value: Any):
if isinstance(value, Template):
watcher = self.context.create_template_watcher(self, name, type_converter, value)
if watcher.needs_runtime_values:
self.watchers_with_need_for_runtime_values[name] = watcher
else:
self.set_property(name, type_converter(value))
def get_property(self, name: str, config: dict, stencil: dict, default: Any = None):
result = config.get(name, None) if config else None
if not result and stencil:
stencil_value = stencil.get(name, None)
if isinstance(stencil_value, list):
result = stencil_value[:]
else:
result = stencil_value
if result is None:
result = default
return result
def set_property(self, name: str, value: Any):
if hasattr(self, name) and getattr(self, name) == value:
return
setattr(self, name, value)
entity = getattr(self, co.ATTR_ENTITY, None)
type = getattr(self, co.ATTR_TYPE, None)
if entity and type:
Dispatcher(self.context.hass).send_signal(co.SIGNAL_PROPERTY_COMPLETE, entity, type)
class Schedule(PropertyContainer):
def __init__(self, context: WorkflowContext, config: dict, stencil: dict):
super().__init__(context)
if not (config or stencil): return
self.at = self.get_property(co.ATTR_SCHEDULE_AT, config, stencil)
self.weekdays = self.get_property(co.ATTR_SCHEDULE_WEEKDAYS, config, stencil, [])
def as_dict(self) -> dict:
result = {
co.ATTR_SCHEDULE_AT: self.at.strftime("%H:%M:%S"),
}
if self.weekdays:
result[co.ATTR_SCHEDULE_WEEKDAYS] = self.weekdays
return result
class Ctor(PropertyContainer):
entity: str
type: str
action: str
def __init__(self, context: WorkflowContext, id: str, entity: Union[str, Template]):
super().__init__(context)
self.id = id
self.init_property_value(co.ATTR_ENTITY, co.PROP_TYPE_STR, entity)
def load(self, config: Any, stencil: dict):
self.init_property(co.ATTR_TYPE, co.PROP_TYPE_STR, config, stencil)
self.init_property(co.ATTR_ACTION, co.PROP_TYPE_STR, config, stencil)
self.init_property(co.ATTR_CONDITION, co.PROP_TYPE_BOOL, config, stencil, True)
def runtime_value(self, name: str, backup: Any = None) -> Any:
result = None
if hasattr(self, name):
result = getattr(self, name)
if not result and backup and hasattr(backup, name):
result = getattr(backup, name)
return result
def as_dict(self) -> dict:
return {
a: getattr(self, a)
for a in [co.ATTR_ENTITY, co.ATTR_TYPE, co.ATTR_ACTION, co.ATTR_CONDITION]
if getattr(self, a) is not None
}
class RuntimeActor:
id: str
entity: str
type: str
action: str
condition: bool
class Actor(Ctor):
def __init__(self, context: WorkflowContext, id: str, entity: Union[str, Template]):
super().__init__(context, id, entity)
def load(self, config: Any, stencil: dict):
co.LOGGER.info("Config", "'{}' loading actor: '{}'", self.context.workflow_id, self.id)
return super().load(config, stencil)
def to_runtime(self, reaction: ReactionEntry) -> RuntimeActor:
result = RuntimeActor()
for attr in [co.ATTR_ID, co.ATTR_CONDITION, co.ATTR_ENTITY, co.ATTR_TYPE, co.ATTR_ACTION]:
setattr(result, attr, self.runtime_value(attr, reaction))
return result
class RuntimeReactor:
id: str
entity: str
type: str
action: str
timing: str
delay: int
schedule: Any
overwrite: bool
reset_workflow: str
forward_action: str
condition: bool
class Reactor(Ctor):
timing: str
delay: int
overwrite: bool
reset_workflow: str
forward_action: bool
def __init__(self, context: WorkflowContext, id: str, entity: Union[str, Template]):
super().__init__(context, id, entity)
def load(self, config: dict, stencil: dict):
co.LOGGER.info("Config", "'{}' loading reactor: '{}'", self.context.workflow_id, self.id)
super().load(config, stencil)
self.timing = self.get_property(co.ATTR_TIMING, config, stencil, 'immediate')
self.init_property(co.ATTR_DELAY, co.PROP_TYPE_INT, config, stencil)
self.schedule = self.load_schedule(config, stencil)
self.init_property(co.ATTR_OVERWRITE, co.PROP_TYPE_BOOL, config, stencil, False)
self.init_property(co.ATTR_RESET_WORKFLOW, co.PROP_TYPE_STR, config, stencil)
self.init_property(co.ATTR_FORWARD_ACTION, co.PROP_TYPE_BOOL, config, stencil, False)
def load_schedule(self, config: dict, stencil: dict) -> Schedule:
if co.ATTR_SCHEDULE in config or co.ATTR_SCHEDULE in stencil:
return Schedule(self.context, config.get(co.ATTR_SCHEDULE, None), stencil.get(co.ATTR_SCHEDULE, None))
return None
def calculate_reaction_datetime(self):
if (self.timing == co.REACTOR_TIMING_IMMEDIATE):
return None
if self.timing == co.REACTOR_TIMING_DELAYED:
return datetime.now() + timedelta(seconds = self.delay)
elif self.timing == co.REACTOR_TIMING_SCHEDULED:
return self.calculate_next_schedule_hit()
def calculate_next_schedule_hit(self):
if not self.schedule or not self.schedule.at: return None
at = self.schedule.at
weekdays = self.schedule.weekdays
now = datetime.now()
next_try = datetime(now.year, now.month, now.day, at.hour, at.minute, at.second)
if next_try < now:
next_try = next_try + timedelta(days=1)
if weekdays and len(weekdays) > 0:
attempt = 1
while True:
day_name = next_try.strftime("%A")[0:3].lower()
if day_name in weekdays:
break
else:
next_try = next_try + timedelta(days=1)
attempt += 1
if (attempt > 7): raise Exception("could not calculate next schedule hit")
return next_try
def to_runtime(self, actor: RuntimeActor) -> RuntimeReactor:
result = RuntimeReactor()
for attr in [co.ATTR_ID, co.ATTR_CONDITION, co.ATTR_ENTITY, co.ATTR_TYPE, co.ATTR_ACTION, co.ATTR_TIMING, co.ATTR_DELAY, co.ATTR_SCHEDULE, co.ATTR_OVERWRITE, co.ATTR_RESET_WORKFLOW, co.ATTR_FORWARD_ACTION]:
setattr(result, attr, self.runtime_value(attr, actor))
return result
def runtime_value(self, name: str, actor: Actor) -> Any:
if name in self.watchers_with_need_for_runtime_values:
runtime_values = {
co.ATTR_ACTOR: actor.__dict__
}
return self.watchers_with_need_for_runtime_values[name].runtime_value(runtime_values)
else:
return super().runtime_value(name)
def as_dict(self) -> dict:
base_dict = super().as_dict()
self_dict = {
a: getattr(self, a)
for a in [co.ATTR_TIMING, co.ATTR_DELAY, co.ATTR_OVERWRITE, co.ATTR_RESET_WORKFLOW, co.ATTR_FORWARD_ACTION]
if getattr(self, a) is not None and getattr(self, a) != False
}
if self.schedule:
self_dict[co.ATTR_SCHEDULE] = self.schedule.as_dict(),
return base_dict | self_dict
ctor_type = Callable[[WorkflowContext, str, str], Union[Actor, Reactor] ]
class Workflow(PropertyContainer):
def __init__(self, context: WorkflowContext, config: dict):
super().__init__(context)
self.id = context.workflow_id
self.entity_id = co.ENTITY_ID_FORMAT.format(context.workflow_id)
self.stencil = config.get(co.ATTR_STENCIL, None)
self.friendly_name = config.get(co.ATTR_FRIENDLY_NAME, None)
self.icon = config.get(co.CONF_ICON, None)
def load(self, config, stencil):
self.actors: dict[str, Actor] = self.load_items(config, stencil, co.ATTR_ACTOR, Actor)
self.reactors: dict[str, Reactor] = self.load_items(config, stencil, co.ATTR_REACTOR, Reactor)
def load_items(self, config: Any, stencil: dict, item_property: str, item_type: ctor_type) -> Dict[str, Union[Actor, Reactor]]:
if not config: return []
items_config = self.get_property(item_property, config, None, {})
items_stencil = stencil.get(item_property, {})
result = {}
for id,item_config in items_config.items():
item_stencil = items_stencil.get(id, {})
self.load_entities(id, item_config, item_stencil, item_type, result)
for id,item_stencil in items_stencil.items():
# Check for any stencil item that is not part of the workflow yet.
# Add an entity for each match.
if id not in result:
self.load_entities(id, {}, item_stencil, item_type, result)
return result
def load_entities(self, id: str, item_config: dict, item_stencil: dict, item_type: ctor_type, result: dict):
entity_data = self.get_property(co.ATTR_ENTITY, item_config, item_stencil)
if isinstance(entity_data, Template):
self.load_entity(id, item_config, item_stencil, item_type, result, entity_data)
elif isinstance(entity_data, list):
is_multiple = len(entity_data) > 1
for i,entity in enumerate(entity_data):
item_id = "{}_{}".format(id, i) if is_multiple else id
self.load_entity(item_id, item_config, item_stencil, item_type, result, entity)
elif entity_data is None:
self.load_entity(id, item_config, item_stencil, item_type, result, None)
def load_entity(self, item_id: str, item_config: dict, item_stencil: dict, item_type: ctor_type, result: dict, entity: Union[str, Template]):
item: Ctor = item_type(self.context, item_id, entity)
item.load(item_config, item_stencil)
result[item.id] = item
def on_update(self, callable: callable_type) -> None:
self.context.on_update(callable)
def as_dict(self) -> dict:
result = {
a: getattr(self, a)
for a in [co.ATTR_ID, co.ATTR_STENCIL, co.ATTR_FRIENDLY_NAME]
if getattr(self, a) is not None
}
result[co.ATTR_ACTOR] = {}
result[co.ATTR_REACTOR] = {}
for id,actor in self.actors.items():
result[co.ATTR_ACTOR][id] = actor.as_dict()
for id,reactor in self.reactors.items():
result[co.ATTR_REACTOR][id] = reactor.as_dict()
return result
@callback
def async_unload(self) -> None:
self.context.unload()
class ConfigManager:
def __init__(self, hass: HomeAssistant):
self.hass = hass
self.workflows = None
def load(self, config: ConfigType) -> None:
co.LOGGER.info("Config", "loading react configuration")
self.domain_config = config.get(co.DOMAIN, {})
if self.domain_config:
co.LOGGER.info("Config", "found react configuration, processing")
self.stencil_config = self.domain_config.get(co.CONF_STENCIL, {})
self.workflow_config = self.domain_config.get(co.CONF_WORKFLOW, {})
self.parse_workflow_config(self.hass)
else:
self.workflows: dict[str, Workflow] = {}
co.LOGGER.info("Config", "no react configuration found")
def unload(self):
co.LOGGER.info("Config", "unloading react configuration")
if self.workflows:
for workflow in self.workflows.values():
workflow.async_unload()
self.workflows = None
self.workflow_config = None
self.stencil_config = None
def reload(self, config: ConfigType):
self.unload()
self.load(config)
def parse_workflow_config(self, hass: HomeAssistant):
co.LOGGER.info("Config", "loading react workflows")
self.workflows: dict[str, Workflow] = {}
for id, config in self.workflow_config.items():
co.LOGGER.info("Config", "'{}' processing workflow", id)
if not config:
config = {}
context = WorkflowContext(hass, id)
context.load_variables(config.get(co.ATTR_VARIABLES, {}))
workflow = Workflow(context, config)
stencil = self.get_stencil_by_name(workflow.stencil)
workflow.load(config, stencil)
self.workflows[id] = workflow
def get_stencil_by_name(self, stencil_name) -> dict:
result = {}
if stencil_name:
stencil = self.stencil_config.get(stencil_name, None)
if stencil:
result = stencil
else:
co.LOGGER.error("Config", "Stencil not found: '{}'".format(stencil_name))
return result
def get_workflow_metadata(self, reaction: ReactionEntry) -> Tuple[Workflow, Actor, Reactor]:
workflow = self.workflows.get(reaction.workflow_id, None)
if workflow is None:
co.LOGGER.warn("Config: workflow that created reaction not found: '{}'".format(reaction.id))
return None, None, None
actor = workflow.actors.get(reaction.actor_id, None)
if actor is None:
co.LOGGER.warn("Config: actor in workflow that created reaction not found: '{}'.'{}'".format(workflow.id, reaction.id))
return None, None, None
reactor = workflow.reactors.get(reaction.reactor_id, None)
if reactor is None:
co.LOGGER.warn("Config: reactor in workflow that created reaction not found: '{}'.'{}'".format(workflow.id, reaction.id))
return None, None, None
return workflow, actor, reactor
def get(hass: HomeAssistant) -> ConfigManager:
if co.DOMAIN_BOOTSTRAPPER in hass.data:
return hass.data[co.DOMAIN_BOOTSTRAPPER].config_manager
return None
|
#! /usr/bin/env python
from __future__ import print_function
import sys
from Bio import SeqIO
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='convert embl to genbank.')
parser.add_argument('infile', nargs='?', type=argparse.FileType('rU'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
for s in SeqIO.parse(args.infile, 'embl'):
_ = SeqIO.write(s, args.outfile, 'genbank')
|
import os
from typing import Tuple
import cyvcf2
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from snpdb.models import ImportSource, Sequence, md5sum_str
from upload.models import UploadedFile, UploadPipeline, UploadedVCF, UploadStep, UploadedFileTypes
from upload.vcf.bulk_genotype_vcf_processor import BulkGenotypeVCFProcessor
from upload.vcf.bulk_no_genotype_vcf_processor import BulkNoGenotypeVCFProcessor
from upload.vcf.sql_copy_files import COHORT_GENOTYPE_HEADER
from upload.vcf.vcf_import import create_vcf_from_vcf
class TestVCFProcessors(TestCase):
TEST_DATA_DIR = os.path.join(settings.BASE_DIR, "upload", "test_data", "vcf")
@classmethod
def setUpClass(cls):
super().setUpClass()
for base in "GATC":
Sequence.objects.get_or_create(seq=base, seq_md5_hash=md5sum_str(base), length=len(base))
@classmethod
def _create_fake_upload_step_and_vcf(cls, vcf_filename, vcf_reader) -> Tuple[UploadStep, UploadedVCF]:
user = User.objects.get_or_create(username='testuser')[0]
uploaded_file = UploadedFile.objects.create(path=vcf_filename,
import_source=ImportSource.COMMAND_LINE,
user=user,
file_type=UploadedFileTypes.VCF,
visible=False)
upload_pipeline = UploadPipeline.objects.create(uploaded_file=uploaded_file)
upload_step = UploadStep.objects.create(upload_pipeline=upload_pipeline,
input_filename=vcf_filename,
sort_order=0)
create_vcf_from_vcf(upload_step, vcf_reader)
uploaded_vcf = UploadedVCF.objects.get(upload_pipeline=upload_pipeline)
return upload_step, uploaded_vcf
def _test_genotype_processor(self, vcf_filename, processor_klass):
""" I keep forgetting to adjust the columns to match the CSV """
fast_vcf_reader = cyvcf2.VCF(vcf_filename)
upload_step, uploaded_vcf = self._create_fake_upload_step_and_vcf(vcf_filename, fast_vcf_reader)
processor = processor_klass(upload_step, None, uploaded_vcf, None)
for v in fast_vcf_reader:
processor.process_entry(v)
break
cg = None
for field_name in ["locus_cohort_genotypes", "cohort_genotypes"]:
if f := getattr(processor, field_name, None):
cg = f[0]
break
if cg is None:
raise ValueError("Couldn't find array to retrieve cohort genotype")
len_genotype_cols = len(cg)
len_columns = len(COHORT_GENOTYPE_HEADER) - BulkGenotypeVCFProcessor.COHORT_GT_NUM_ADDED_FIELDS
message = f"{processor_klass} CohortGenotypeData ({len_genotype_cols} cols) != CSV columns ({len_columns})"
self.assertEqual(len_genotype_cols, len_columns, message)
def test_no_genotype_processor(self):
vcf_filename = os.path.join(self.TEST_DATA_DIR, "no_genotype.GRCh37.vcf")
self._test_genotype_processor(vcf_filename, BulkNoGenotypeVCFProcessor)
def test_genotype_processor(self):
vcf_filename = os.path.join(self.TEST_DATA_DIR, "sample1_hg19.vcf")
self._test_genotype_processor(vcf_filename, BulkGenotypeVCFProcessor)
|
#!/usr/bin/python3
from telnetlib import Telnet
from re import findall
from time import sleep, time
from os import environ
from influxdb import InfluxDBClient
ROUTER = environ["ROUTER_IP_ADDRESS"]
USER = environ["ROUTER_LOGIN_USER"]
PASSWORD = environ["ROUTER_LOGIN_PASSWORD"]
PROMPT = environ["ROUTER_PROMPT"]
DB_NAME = environ["INFLUX_DB_NAME"]
DB_CONTAINER = environ["INFLUX_DB_ADDRESS"]
DB_PORT = environ["INFLUX_DB_PORT"]
DB_USER = environ["INFLUX_DB_USER"]
DB_PASSWORD = environ["INFLUX_DB_PASSWORD"]
MONITORING_INTERVAL = int(environ["MONITORING_INTERVAL"])
BANDWIDTH_SAMPLING_INTERVAL = 1 # 1sec -> bps int(environ["BANDWIDTH_SAMPLING_INTERVAL"])
"""
firmware version
RTX1200 Rev.10.01.65 (Tue Oct 13 12:23:48 2015)
"""
class RTXTelnet:
"""
RTXTelnet: Telnet wrapper for RTX series
"""
log = ""
def __init__(self, router, username, password, port=23, prompt="", timeout=5, wait=0.5):
self.router = router
self.username = username
self.password = password
self.port = port
self.prompt = "\r\n" + prompt + "> "
self.timeout = timeout
self.wait = wait
self.connect()
def connect(self):
"""
connect(self): Connect to RTX via telnet
"""
# セッション開通
self.telnet = Telnet(self.router, port=self.port, timeout=self.timeout)
self.telnet.read_very_eager() # プロンプトを出す
self.telnet.write(b"\n") # ←ナマステ!(バシッ)今日は無名ログインスンナスンナスンナスンナスンナスンナスンナスンナ
self.telnet.read_very_eager() # プロンプト飛ばす
# ユーザー名とパスワードでログインする
while True:
pro = self.telnet.read_very_eager().decode()
self.log += pro
if pro.endswith(self.prompt):
break
elif pro.endswith("\r\nUsername: "):
self.telnet.write(self.username.encode("ascii") + b"\n")
elif pro.endswith("\r\nPassword: "):
self.telnet.write(self.password.encode("ascii") + b"\n")
sleep(self.wait)
# ASCIIモードを適用
self.telnet.write(b"console character ascii\n\n")
sleep(self.wait * 2)
while True:
pro = self.telnet.read_very_eager().decode()
self.log += pro
if pro.endswith(self.prompt):
break
def disconnect(self):
"""
disconnect(self): Disconnect from RTX
"""
self.telnet.write(b"exit\n")
self.log += self.telnet.read_very_eager().decode()
self.telnet.close()
def execute(self, cmd):
"""
execute(self, cmd): Execute command in RTX
"""
# プロンプトを用意
while True:
pro = self.telnet.read_very_eager().decode()
self.log += pro
if pro.endswith(self.prompt):
break
if not pro:
self.telnet.write(b"\n")
sleep(self.wait)
# 実行
self.telnet.write(cmd.encode("ascii") + b"\n")
sleep(self.wait * 2)
res = ""
while True:
res += self.telnet.read_very_eager().decode()
if res.endswith(self.prompt):
self.log += res
res = res.replace(cmd + " \x08 \x08\r\n", "").replace("---more---\r \r", "").replace(self.prompt, "")
break
elif res.endswith("---more---"):
self.telnet.write(b" ")
sleep(self.wait)
return res
def post_influxdb(dbconn, measurement, field, value):
request = [
{
"measurement": measurement,
"fields": {
field: value,
}
}
]
print(request)
dbconn.write_points(request)
return True
def grep(pattern, text):
return findall(pattern, text)
def lan_interfaces():
# RTX1200はLAN1 LAN2 LAN3だった
return ["1", "2", "3"]
def pp_interfaces(config):
t = []
for w in grep(r"pp select (\d+)", config):
t.append(w)
return sorted(set(t), key=t.index)
def dhcp_scopes(config):
t = []
for w in grep(r"dhcp scope (\d+)", config):
t.append(w)
return sorted(set(t), key=t.index)
def lan_interface_speed(config, num):
val = grep(r"speed lan"+num+r" (\d+\w?)", config)
if (not val) or (val == 0):
val = unitstr2num("1000m")
return val
def unitstr2num(text):
val = int(grep(r"(\d+)", text)[0])
unit = text[-1:].lower()
if unit == "k":
return int(val * 1000)
elif unit == "m":
return int(val * 1000 * 1000)
else:
return int(val)
def environment_mon():
status = TN.execute("show environment")
# uptime
uptime = grep(r"Elapsed time from boot: (\d+)days (\d+):(\d+):(\d+)", status)[0]
days = int(uptime[0])
hours = int(uptime[1])
minutes = int(uptime[2])
seconds = int(uptime[3])
uptime_sec = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60 + seconds
post_influxdb(DB, "uptime", "sec", uptime_sec)
# cpu
post_influxdb(DB, "cpu", "5sec", int(grep(r"(\d+)%\(5sec\)", status)[0]))
post_influxdb(DB, "cpu", "1min", int(grep(r"(\d+)%\(1min\)", status)[0]))
post_influxdb(DB, "cpu", "5min", int(grep(r"(\d+)%\(5min\)", status)[0]))
# memory
post_influxdb(DB, "memory", "now", int(grep(r"Memory: (\d+)%", status)[0]))
# packet buffer
post_influxdb(DB, "packet_buffer", "small", int(grep(r"(\d+)%\(small\)", status)[0]))
post_influxdb(DB, "packet_buffer", "middle", int(grep(r"(\d+)%\(middle\)", status)[0]))
post_influxdb(DB, "packet_buffer", "large", int(grep(r"(\d+)%\(large\)", status)[0]))
post_influxdb(DB, "packet_buffer", "huge", int(grep(r"(\d+)%\(huge\)", status)[0]))
# temperature
post_influxdb(DB, "temperature", "now", int(grep(r"Inside Temperature\(C.\): (\d+)", status)[0]))
def nat_mon():
status = TN.execute("show nat descriptor address")
if grep(r"(\d+) used.", status):
value = int(grep(r"(\d+) used.", status)[0])
else:
value = -1
post_influxdb(DB, "nat", "entry", value)
def dhcp_mon(config):
for i in dhcp_scopes(config):
status = TN.execute("show status dhcp "+i)
post_influxdb(DB, "dhcp"+i, "leased", int(grep(r"Leased: (\d+)", status)[0]))
post_influxdb(DB, "dhcp"+i, "usable", int(grep(r"Usable: (\d+)", status)[0]))
def pp_traffic_mon(config, sec):
for i in pp_interfaces(config):
start_time = time()
status1 = TN.execute("show status pp "+i)
sleep(sec)
status2 = TN.execute("show status pp "+i)
running_time = time() - start_time
if "Connected" in status1:
rcv1 = int(grep(r"\[(\d+) octets?\]", status1)[0])
snd1 = int(grep(r"\[(\d+) octets?\]", status1)[1])
rcv2 = int(grep(r"\[(\d+) octets?\]", status2)[0])
snd2 = int(grep(r"\[(\d+) octets?\]", status2)[1])
post_influxdb(DB, "pp"+i, "receive", (rcv2 - rcv1) / running_time)
post_influxdb(DB, "pp"+i, "transmit", (snd2 - snd1) / running_time)
rcv_load = int(grep(r"Load:\s+(\d+).(\d+)%", status1)[0][0]) + int(grep(r"Load:\s+(\d+).(\d+)%", status1)[0][1]) / 10
snd_load = int(grep(r"Load:\s+(\d+).(\d+)%", status1)[1][0]) + int(grep(r"Load:\s+(\d+).(\d+)%", status1)[1][1]) / 10
post_influxdb(DB, "pp"+i, "receive_load", rcv_load)
post_influxdb(DB, "pp"+i, "transmit_load", snd_load)
def lan_traffic_mon(config, sec):
for i in lan_interfaces():
start_time = time()
status1 = TN.execute("show status lan"+i)
sleep(sec)
status2 = TN.execute("show status lan"+i)
running_time = time() - start_time
bandwidth = lan_interface_speed(config, i)
snd1 = int(grep(r"\((\d+) octets?\)", status1)[0])
rcv1 = int(grep(r"\((\d+) octets?\)", status1)[1])
snd2 = int(grep(r"\((\d+) octets?\)", status2)[0])
rcv2 = int(grep(r"\((\d+) octets?\)", status2)[1])
post_influxdb(DB, "lan"+i, "receive", (rcv2 - rcv1) / running_time)
post_influxdb(DB, "lan"+i, "transmit", (snd2 - snd1) / running_time)
post_influxdb(DB, "lan"+i, "receive_load", ((rcv2 - rcv1) * 8 / running_time) / bandwidth)
post_influxdb(DB, "lan"+i, "transmit_load", ((snd2 - snd1) * 8 / running_time) / bandwidth)
def main():
"""
Main
"""
while True:
try:
config = TN.execute("show config")
environment_mon()
nat_mon()
dhcp_mon(config)
pp_traffic_mon(config, BANDWIDTH_SAMPLING_INTERVAL)
lan_traffic_mon(config, BANDWIDTH_SAMPLING_INTERVAL)
except:
print("failed to post")
sleep(MONITORING_INTERVAL)
if __name__ == '__main__':
TN = RTXTelnet(ROUTER, USER, PASSWORD, prompt=PROMPT, timeout=3, wait=0.2)
DB = InfluxDBClient(DB_CONTAINER, DB_PORT, DB_USER, DB_PASSWORD, DB_NAME)
main()
|
"""
==================
Errorbar Subsample
==================
Demo for the errorevery keyword to show data full accuracy data plots with
few errorbars.
"""
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.1, 4, 0.1)
y1 = np.exp(-1.0 * x)
y2 = np.exp(-0.5 * x)
# example variable error bar values
y1err = 0.1 + 0.1 * np.sqrt(x)
y2err = 0.1 + 0.1 * np.sqrt(x/2)
# Now switch to a more OO interface to exercise more features.
fig, (ax_l, ax_c, ax_r) = plt.subplots(nrows=1, ncols=3,
sharex=True, figsize=(12, 6))
ax_l.set_title('all errorbars')
ax_l.errorbar(x, y1, yerr=y1err)
ax_l.errorbar(x, y2, yerr=y2err)
ax_c.set_title('only every 6th errorbar')
ax_c.errorbar(x, y1, yerr=y1err, errorevery=6)
ax_c.errorbar(x, y2, yerr=y2err, errorevery=6)
ax_r.set_title('second series shifted by 3')
ax_r.errorbar(x, y1, yerr=y1err, errorevery=(0, 6))
ax_r.errorbar(x, y2, yerr=y2err, errorevery=(3, 6))
fig.suptitle('Errorbar subsampling for better appearance')
plt.show()
|
import math
import numpy as np
from typing import Any, List
from util import common_values
from util.game_state import GameState
from util.physics_object import PhysicsObject
from util.player_data import PlayerData
class CustomObs:
POS_STD = 2300
ANG_STD = math.pi
def __init__(self, cars):
super().__init__()
self.obs_size = 9 + 8 + 25 + 31 * (cars - 1) + 34
def reset(self, initial_state: GameState):
pass
def build_obs(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> Any:
if player.team_num == common_values.ORANGE_TEAM:
inverted = True
ball = state.inverted_ball
pads = state.inverted_boost_pads
else:
inverted = False
ball = state.ball
pads = state.boost_pads
obs = [ball.position / CustomObs.POS_STD,
ball.linear_velocity / CustomObs.POS_STD,
ball.angular_velocity / CustomObs.ANG_STD,
previous_action,
pads]
player_car = self._add_player_to_obs(obs, player, ball, inverted)
allies = []
enemies = []
for other in state.players:
if other.car_id == player.car_id:
continue
if other.team_num == player.team_num:
team_obs = allies
else:
team_obs = enemies
other_car = self._add_player_to_obs(team_obs, other, ball, inverted)
# Extra info
team_obs.extend([
(other_car.position - player_car.position) / CustomObs.POS_STD,
(other_car.linear_velocity - player_car.linear_velocity) / CustomObs.POS_STD
])
obs.extend(allies)
obs.extend(enemies)
return np.concatenate(obs)
def _add_player_to_obs(self, obs: List, player: PlayerData, ball: PhysicsObject, inverted: bool):
if inverted:
player_car = player.inverted_car_data
else:
player_car = player.car_data
rel_pos = ball.position - player_car.position
rel_vel = ball.linear_velocity - player_car.linear_velocity
obs.extend([
rel_pos / CustomObs.POS_STD,
rel_vel / CustomObs.POS_STD,
player_car.position / CustomObs.POS_STD,
player_car.forward(),
player_car.up(),
player_car.linear_velocity / CustomObs.POS_STD,
player_car.angular_velocity / CustomObs.ANG_STD,
[player.boost_amount,
int(player.on_ground),
int(player.has_flip),
int(player.is_demoed)]])
return player_car
|
import os
import unittest
import doto
d0 = doto.connect_d0()
class TestConfig(unittest.TestCase):
def test_get_all_droplets(self):
self.assertEqual(d0.get_all_droplets(status_check=True),200)
self.assertEqual(d0.get_sizes(status_check=True),200)
self.assertEqual(d0.get_images(status_check=True),200)
self.assertEqual(d0.get_domains(status_check=True),200)
self.assertEqual(d0.get_regions(status_check=True),200)
self.assertEqual(d0.get_ssh_keys(status_check=True),200)
if __name__ == '__main__':
unittest.main()
droplet.power_off()
while droplet.event_status != 'done':
droplet.event_update() |
from collections import OrderedDict
from ..util import create_element
from .common import EWSAccountService, create_folder_ids_element
class EmptyFolder(EWSAccountService):
"""
MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/emptyfolder
"""
SERVICE_NAME = 'EmptyFolder'
element_container_name = None # EmptyFolder doesn't return a response object, just status in XML attrs
def call(self, folders, delete_type, delete_sub_folders):
return self._get_elements(payload=self.get_payload(folders=folders, delete_type=delete_type,
delete_sub_folders=delete_sub_folders))
def get_payload(self, folders, delete_type, delete_sub_folders):
emptyfolder = create_element(
'm:%s' % self.SERVICE_NAME,
attrs=OrderedDict([
('DeleteType', delete_type),
('DeleteSubFolders', 'true' if delete_sub_folders else 'false'),
])
)
folder_ids = create_folder_ids_element(tag='m:FolderIds', folders=folders, version=self.account.version)
emptyfolder.append(folder_ids)
return emptyfolder
|
import re
from nonebot.typing import T_State
from nonebot.permission import SUPERUSER
from nonebot.adapters.onebot.v11 import Bot, MessageEvent, GroupMessageEvent
from nonebot_plugin_guild_patch import GuildMessageEvent
from nonebot.adapters.onebot.v11.permission import GROUP_OWNER, GROUP_ADMIN
from .data_source import Manage
block_user = Manage().on_command("封禁用户", "对目标用户进行封禁", permission=SUPERUSER)
@block_user.handle()
async def _ready_block_user(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["block_user"] = msg
@block_user.got("block_user", "哪位?GKD!")
async def _deal_block_user(bot: Bot, event: MessageEvent, state: T_State):
user_id = f'{state["block_user"]}'
quit_list = ["算了", "罢了"]
if user_id in quit_list:
await block_user.finish("...看来有人逃过一劫呢")
is_ok = Manage().block_user(user_id)
if not is_ok:
await block_user.finish("kuso!封禁失败了...")
await block_user.finish(f"用户 {user_id} 危!")
unblock_user = Manage().on_command("解封用户", "对目标用户进行解封", permission=SUPERUSER)
@unblock_user.handle()
async def _ready_unblock_user(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["unblock_user"] = msg
@unblock_user.got("unblock_user", "哪位?GKD!")
async def _deal_unblock_user(bot: Bot, event: MessageEvent, state: T_State):
user_id = f'{state["unblock_user"]}'
quit_list = ["算了", "罢了"]
if user_id in quit_list:
await unblock_user.finish("...有人又得继续在小黑屋呆一阵子了")
is_ok = Manage().unblock_user(user_id)
if not is_ok:
await unblock_user.finish("kuso!解封失败了...")
await unblock_user.finish(f"好欸!{user_id} 重获新生!")
block_group = Manage().on_command("封禁群", "对目标群进行封禁", permission=SUPERUSER)
@block_group.handle()
async def _ready_block_group(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["block_group"] = msg
@block_group.got("block_group", "哪个群?GKD!")
async def _deal_block_group(bot: Bot, event: MessageEvent, state: T_State):
group_id =f'{state["block_group"]}'
quit_list = ["算了", "罢了"]
if group_id in quit_list:
await block_group.finish("...看来有一群逃过一劫呢")
is_ok = Manage().block_group(group_id)
if not is_ok:
await block_group.finish("kuso!封禁失败了...")
await block_group.finish(f"群 {group_id} 危!")
unblock_group = Manage().on_command("解封群", "对目标群进行解封", permission=SUPERUSER)
@unblock_group.handle()
async def _ready_unblock_group(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["unblock_group"] = msg
@unblock_group.got("unblock_group", "哪个群?GKD!")
async def _deal_unblock_group(bot: Bot, event: MessageEvent, state: T_State):
group_id = f'{state["unblock_group"]}'
quit_list = ["算了", "罢了"]
if group_id in quit_list:
await unblock_group.finish("...有一群又得继续在小黑屋呆一阵子了")
is_ok = Manage().unblock_group(group_id)
if not is_ok:
await unblock_group.finish("kuso!解封失败了...")
await unblock_group.finish(f"好欸!群 {group_id} 重获新生!")
global_block_service = Manage().on_command("全局禁用", "全局禁用某服务", permission=SUPERUSER)
@global_block_service.handle()
async def _ready_block_service(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["global_block_service"] = msg
@global_block_service.got("global_block_service", "阿...是哪个服务呢")
async def _deal_global_block_service(bot: Bot, event: MessageEvent, state: T_State):
block_service = state["global_block_service"]
quit_list = ["算了", "罢了"]
if block_service in quit_list:
await global_block_service.finish("好吧...")
is_ok = Manage().control_global_service(block_service, False)
if not is_ok:
await global_block_service.finish("kuso!禁用失败了...")
await global_block_service.finish(f"服务 {block_service} 已被禁用")
global_unblock_service = Manage().on_command("全局启用", "全局启用某服务", permission=SUPERUSER)
@global_unblock_service.handle()
async def _ready_unblock_service(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["global_unblock_service"] = msg
@global_unblock_service.got("global_unblock_service", "阿...是哪个服务呢")
async def _deal_global_unblock_service(bot: Bot, event: MessageEvent, state: T_State):
unblock_service = state["global_unblock_service"]
quit_list = ["算了", "罢了"]
if unblock_service in quit_list:
await global_unblock_service.finish("好吧...")
is_ok = Manage().control_global_service(unblock_service, True)
if not is_ok:
await global_unblock_service.finish("kuso!启用服务失败了...")
await global_unblock_service.finish(f"服务 {unblock_service} 已启用")
user_block_service = Manage().on_regex(
r"对用户(.*?)禁用(.*)", "针对某一用户禁用服务", permission=SUPERUSER
)
@user_block_service.handle()
async def _user_block_service(bot: Bot, event: MessageEvent):
msg = str(event.message).strip()
pattern = r"对用户(.*?)禁用(.*)"
reg = re.findall(pattern, msg)
aim_user = reg[0]
aim_service = reg[1]
is_ok = Manage().control_user_service(aim_service, aim_user, False)
if not is_ok:
await user_block_service.finish("禁用失败...请检查服务名是否正确")
await user_block_service.finish(f"完成~已禁止用户 {aim_user} 使用 {aim_service}")
user_unblock_service = Manage().on_regex(
r"对用户(.*?)启用(.*)", "针对某一用户启用服务", permission=SUPERUSER
)
@user_unblock_service.handle()
async def _user_unblock_service(bot: Bot, event: MessageEvent):
msg = str(event.message).strip()
pattern = r"对用户(.*?)启用(.*)"
reg = re.findall(pattern, msg)
aim_user = reg[0]
aim_service = reg[1]
is_ok = Manage().control_user_service(aim_service, aim_user, True)
if not is_ok:
await user_unblock_service.finish("启用失败...请检查服务名是否正确,或者此人并不存在于名单中")
await user_unblock_service.finish(f"完成~已允许用户 {aim_user} 使用 {aim_service}")
group_block_service = Manage().on_command("禁用", "针对所在群禁用某服务", permission=SUPERUSER)
@group_block_service.handle()
async def _ready_group_block_service(
bot: Bot, event: MessageEvent, state: T_State
):
msg = str(event.message).strip()
if msg:
state["group_block_service"] = msg
@group_block_service.got("group_block_service", "阿...是哪个服务呢")
async def _deal_group_block_service(bot: Bot, event: MessageEvent, state: T_State):
aim_service = state["group_block_service"]
if type(event) is GroupMessageEvent:
group_id = f"{event.group_id}"
nick = '本群'
elif type(event) is GuildMessageEvent:
group_id = f'''{event.guild_id}_{event.channel_id}'''
nick = '本频道'
quit_list = ["算了", "罢了"]
if aim_service in quit_list:
await group_block_service.finish("好吧...")
is_ok = Manage().control_group_service(aim_service, group_id, False)
if not is_ok:
await group_block_service.finish("禁用失败...请检查服务名是否输入正确")
await group_block_service.finish(f"完成!~已禁止{nick}使用服务:{aim_service}")
group_unblock_service = Manage().on_command(
"启用", "针对所在群启用某服务", permission=SUPERUSER | GROUP_OWNER | GROUP_ADMIN
)
@group_unblock_service.handle()
async def _ready_group_unblock_service(
bot: Bot, event: MessageEvent, state: T_State
):
msg = str(event.message).strip()
if msg:
state["group_unblock_service"] = msg
@group_unblock_service.got("group_unblock_service", "阿...是哪个服务呢")
async def _deal_group_unblock_service(
bot: Bot, event: MessageEvent, state: T_State
):
aim_service = state["group_unblock_service"]
if type(event) is GroupMessageEvent:
group_id = f"{event.group_id}"
nick = '本群'
elif type(event) is GuildMessageEvent:
group_id = f'''{event.guild_id}_{event.channel_id}'''
nick = '本频道'
quit_list = ["算了", "罢了"]
if aim_service in quit_list:
await group_unblock_service.finish("好吧...")
is_ok = Manage().control_group_service(aim_service, group_id, True)
if not is_ok:
await group_unblock_service.finish("启用失败...请检查服务名是否输入正确,或群不存在于名单中")
await group_unblock_service.finish(f"完成!~已允许{nick}使用服务:{aim_service}")
get_friend_add_list = Manage().on_command("获取好友申请", "获取好友申请列表", permission=SUPERUSER)
@get_friend_add_list.handle()
async def _get_friend_add_list(bot: Bot, event: MessageEvent):
data = Manage().load_friend_apply_list()
temp_list = list()
for i in data:
apply_code = i
apply_user = data[i]["user_id"]
apply_comment = data[i]["comment"]
temp_msg = f"{apply_user} | {apply_comment} | {apply_code}"
temp_list.append(temp_msg)
msg0 = "申请人ID | 申请信息 | 申请码\n" + "\n".join(map(str, temp_list))
msg1 = msg0 + "\nTip: 使用 同意/拒绝好友 [申请码] 以决定"
await get_friend_add_list.finish(msg1)
approve_friend_add = Manage().on_command("同意好友", "同意好友申请", permission=SUPERUSER)
@approve_friend_add.handle()
async def _ready_approve_friend_add(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["approve_friend_add"] = msg
@approve_friend_add.got("approve_friend_add", "申请码GKD!")
async def _deal_approve_friend_add(bot: Bot, event: MessageEvent, state: T_State):
apply_code = state["approve_friend_add"]
quit_list = ["算了", "罢了"]
if apply_code in quit_list:
await approve_friend_add.finish("好吧...")
try:
await bot.set_friend_add_request(flag=apply_code, approve=True)
except BaseException:
await approve_friend_add.finish("同意失败...尝试下手动?")
data = Manage().load_friend_apply_list()
data.pop(apply_code)
Manage().save_friend_apply_list(data)
await approve_friend_add.finish("好欸!申请已通过!")
refuse_friend_add = Manage().on_command("拒绝好友", "拒绝好友申请", permission=SUPERUSER)
@refuse_friend_add.handle()
async def _ready_refuse_friend_add(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["refuse_friend_add"] = msg
@refuse_friend_add.got("refuse_friend_add", "申请码GKD!")
async def _deal_refuse_friend_add(bot: Bot, event: MessageEvent, state: T_State):
apply_code = state["refuse_friend_add"]
quit_list = ["算了", "罢了"]
if apply_code in quit_list:
await refuse_friend_add.finish("好吧...")
try:
await bot.set_friend_add_request(flag=apply_code, approve=False)
except BaseException:
await refuse_friend_add.finish("拒绝失败...尝试下手动?")
data = Manage().load_friend_apply_list()
data.pop(apply_code)
Manage().save_friend_apply_list(data)
await refuse_friend_add.finish("已拒绝!")
get_group_invite_list = Manage().on_command("获取邀请列表", "获取群邀请列表", permission=SUPERUSER)
@get_group_invite_list.handle()
async def _get_group_invite_list(bot: Bot, event: MessageEvent):
data = Manage().load_invite_apply_list()
temp_list = list()
for i in data:
apply_code = i
apply_user = data[i]["user_id"]
apply_comment = data[i]["comment"]
temp_msg = f"{apply_user} | {apply_comment} | {apply_code}"
temp_list.append(temp_msg)
msg0 = "申请人ID | 申请信息 | 申请码\n" + "\n".join(map(str, temp_list))
msg1 = msg0 + "\nTip: 使用 同意/拒绝邀请 [申请码] 以决定"
await get_friend_add_list.finish(msg1)
approve_group_invite = Manage().on_command("同意邀请", "同意群聊邀请", permission=SUPERUSER)
@approve_group_invite.handle()
async def _ready_approve_group_invite(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["approve_group_invite"] = msg
@approve_group_invite.got("approve_group_invite", "申请码GKD!")
async def _deal_approve_group_invite(bot: Bot, event: MessageEvent, state: T_State):
apply_code = state["approve_group_invite"]
quit_list = ["算了", "罢了"]
if apply_code in quit_list:
await approve_group_invite.finish("好吧...")
try:
await bot.set_group_add_request(
flag=apply_code, sub_type="invite", approve=True
)
except BaseException:
await approve_group_invite.finish("同意失败...尝试下手动?")
data = Manage().load_invite_apply_list()
data.pop(apply_code)
Manage().save_invite_apply_list(data)
await approve_group_invite.finish("好欸!申请已通过!")
refuse_group_invite = Manage().on_command("拒绝邀请", "拒绝群聊邀请", permission=SUPERUSER)
@refuse_group_invite.handle()
async def _ready_refuse_group_invite(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state["refuse_group_invite"] = msg
@refuse_group_invite.got("refuse_group_invite", "申请码GKD!")
async def _deal_refuse_group_invite(bot: Bot, event: MessageEvent, state: T_State):
apply_code = state["refuse_group_invite"]
quit_list = ["算了", "罢了"]
if apply_code in quit_list:
await refuse_group_invite.finish("好吧...")
try:
await bot.set_group_add_request(
flag=apply_code, sub_type="invite", approve=False
)
except BaseException:
await refuse_group_invite.finish("拒绝失败...尝试下手动?")
data = Manage().load_invite_apply_list()
data.pop(apply_code)
Manage().save_invite_apply_list(data)
await refuse_group_invite.finish("已拒绝!")
# track_error = Manage().on_command("追踪", "获取报错信息,传入追踪码", aliases={"/track"})
# @track_error.handle()
# async def _track_error(bot: Bot, event: MessageEvent):
# track_id = str(event.message).strip()
# repo = await Manage().track_error(track_id)
# await track_error.finish(repo)
|
import torch
import dsntnn
def gaze_loss(targets, masks, targets_start_from, masks_start_from, coords, heatmaps, probabilities, slice_ind,
interpolate_coordinates):
gaze_targets = targets[targets_start_from:targets_start_from + 16*interpolate_coordinates, :].transpose(1, 0).reshape(-1, 8*interpolate_coordinates, 1, 2)
gaze_masks = masks[:, masks_start_from:masks_start_from+8*interpolate_coordinates].squeeze()
gaze_coords = coords[:, :, slice_ind, :]
gaze_coords.unsqueeze_(2)
gaze_heatmaps = heatmaps[:, :, slice_ind, :]
gaze_heatmaps.unsqueeze_(2)
gaze_coord_loss = calc_coord_loss(gaze_coords, gaze_heatmaps, gaze_targets, gaze_masks)
return gaze_coord_loss
def hand_loss(targets, masks, targets_start_from, masks_start_from, coords, heatmaps, probabilities, slice_from, interpolate_coordinates):
hand_targets = targets[targets_start_from:targets_start_from + 32*interpolate_coordinates, :].transpose(1, 0).reshape(-1, 8*interpolate_coordinates, 2, 2)
hand_masks = masks[:, masks_start_from:masks_start_from+16*interpolate_coordinates].reshape(-1, 2, 8*interpolate_coordinates).transpose(1, 2).squeeze()
# hand_masks = masks[:, masks_start_from:masks_start_from+16].reshape(-1, 8, 2).squeeze()
# for hands slice the last two elements, first is left, second is right hand
hand_coords = coords[:, :, slice_from:slice_from + 2, :]
hand_heatmaps = heatmaps[:, :, slice_from:slice_from + 2, :]
hand_coord_loss = calc_coord_loss(hand_coords, hand_heatmaps, hand_targets, hand_masks)
return hand_coord_loss
def calc_coord_loss(coords, heatmaps, target_var, masks):
# Per-location euclidean losses
euc_losses = dsntnn.euclidean_losses(coords, target_var) # shape:[B, D, L, 2] batch, depth, locations, feature
# Per-location regularization losses
reg_losses = []
for i in range(heatmaps.shape[1]):
hms = heatmaps[:, i]
target = target_var[:, i]
reg_loss = dsntnn.js_reg_losses(hms, target, sigma_t=1.0)
reg_losses.append(reg_loss)
reg_losses = torch.stack(reg_losses, 1)
# reg_losses = dsntnn.js_reg_losses(heatmaps, target_var, sigma_t=1.0) # shape: [B, D, L, 7, 7]
# Combine losses into an overall loss
coord_loss = dsntnn.average_loss((euc_losses + reg_losses).squeeze(), mask=masks)
return coord_loss
|
# Django modules.
from django.urls import reverse
from django.test import TestCase, Client
# !Triplinker modules:
# Another apps modules.
from accounts.models.TLAccount_frequest import TLAccount
from trip_places.models import Place
# Current app modules.
from .models import Journey, Participant
class TestJourneysViews(TestCase):
def setUp(self):
self.user_1 = TLAccount.objects.create_user(first_name='John',
second_name='Li',
email='[email protected]', sex='M',
date_of_birth='2000-10-12', country='BY',
password='secret')
self.client_user_1 = Client()
self.client_user_1.login(username='[email protected]', password='secret')
self.user_1_acc = TLAccount.objects.get(first_name='John')
self.kwargs_user_1 = {'user_id': self.user_1_acc.id}
# Creating Journey
self.place = Place.objects.create()
Participant.objects.create(participant=self.user_1)
self.particapant = Participant.objects.get(participant=self.user_1)
self.journey = Journey.objects.create(place_from=self.place,
place_to=self.place,
who_added_the_journey=self.user_1)
self.particapant.journey = self.journey
self.particapant.save()
self.new_journey = Journey.objects.get(place_from=self.place)
self.journey_kwargs = {'journey_id': self.journey.id}
def test_journey_page_view(self):
url = reverse('journeys:journey-page', kwargs=self.journey_kwargs)
response = self.client_user_1.get(url)
self.assertEquals(response.status_code, 200)
def test_user_journey_list_view(self):
url = reverse('journeys:journey-list', kwargs=self.kwargs_user_1)
response = self.client_user_1.get(url)
self.assertEquals(response.status_code, 200)
def test_sort_journeys_by_date_view(self):
url = reverse('journeys:sort-journeys-by-date',
kwargs=self.kwargs_user_1)
response = self.client_user_1.get(url)
self.assertEquals(response.status_code, 200)
def test_sort_journeys_by_rating_of_place_view(self):
url = reverse('journeys:sort-journeys-by-rating-of-place',
kwargs=self.kwargs_user_1)
response = self.client_user_1.get(url)
self.assertEquals(response.status_code, 200)
|
from dataclasses import dataclass, field
@dataclass
class DeliveryFailure():
pass
@dataclass
class SendFailure():
pass
def send(to=None, cc=None, bcc=None, from_address="no-reply", from_name=None, subject=None, text=None, html=None,
attachments=None, inline_attachments=None):
"""Send an email"""
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 06:34:16 2021
@author: ayman
"""
import torch
import torch.nn.functional as F
import numpy as np
class A3CGruAgent:
"""
Actor critic agent that returns action It applies softmax by default.
"""
def __init__(self, model, env, frames, episodes):
self.model = model
self.device = model.device
self.env = env
self.frames = frames
self.num_actions = env.action_space.n
self.episodes = episodes
self.values = []
self.log_probs = []
self.entropies = []
self.rewards = []
self.reset()
def step(self):
"""
Return actions and hidden states from given list of states.
:param states: list of states
:return: list of actions
"""
self.frames.value += 1
logit, value, self.hx = self.model(self.state, None)
log_prob = F.log_softmax(logit, dim=1)
prob = F.softmax(logit, dim=1)
entropy = -(prob * log_prob).sum(1)
action = self.select(prob)
log_prob = log_prob[range(1), action]
state, self.reward, self.done, _ = self.env.step(action)
self.state = self.preprocess(state).to(self.device)
self.reward = min(max(self.reward, -1), 1)
self.values.append(value)
self.entropies.append(entropy)
self.rewards.append(self.reward)
self.log_probs.append(log_prob)
if self.done:
self.episodes.value += 1
return False
return True
@torch.no_grad()
def play(self, verbose=False):
"""
Return actions and hidden states from given list of states.
:param states: list of states
:return: list of actions
"""
self.clear()
self.reset()
while True:
if verbose: self.env.render()
logit, _, _ = self.model(self.state)
prob = F.softmax(logit, dim=1)
action = self.select(prob)
state, self.reward, self.done, _ = self.env.step(action)
self.rewards.append(self.reward)
if self.done:
if verbose: print(self.get_total_rewards())
self.reset()
break
self.env.close()
def preprocess(self, states):
"""Return tensor -> (b,c,h,w).(device)."""
np_states = np.expand_dims(states, 0)
return torch.tensor(np_states)
def select(self, prob):
"""Select from a probability distribution."""
# return np.random.choice(self.num_actions, p=prob.data.cpu().numpy()[0])
return prob.multinomial(1).data.cpu().numpy()[0]
def get_total_rewards(self):
return sum(self.rewards)
def clear(self):
"""Use to clear all values. Use with reset if you want clean start."""
self.values.clear()
self.log_probs.clear()
self.entropies.clear()
self.rewards.clear()
if self.hx is not None:
self.hx = self.hx.detach()
def reset(self):
"""Reset the agent. Use when episode is done but want to continue training."""
self.hx = None
self.reward = 0
self.done = False
self.state = self.preprocess(self.env.reset()).to(self.device)
|
from flask import Blueprint
bp = Blueprint("profiler", __name__)
from wlanpi_webui.profiler import profiler
|
def kth_element(arr, M, K):
"""
Given an array arr[] of size N and two integers M and K, the task is to find the array element at the Kth index after performing
following M operations on the given array.
In a single operation, a new array is formed whose elements have the Bitwise XOR values of the adjacent elements of the current array.
If the number of operations M or the value of K after M operations is invalid then print -1.
Examples:
Input: arr[] = {1, 4, 5, 6, 7}, M = 1, K = 2
Output: 3
Explanation:
Since M = 1, therefore, the operation has to be performed only once on the array.
The array is modified to {1^4, 4^5, 5^6, 6^7} = {5, 1, 3, 1}.
The value of the element at index K = 2 in the updated array is 3.
"""
# Initializing array for storing resultant array
temp=[]
if M < 0 or M >= len(arr):
return -1
for j in range (M):
for i in range (len(arr)-1):
value = arr[i]^arr[i+1]
temp.append(value)
if j < (M-1):
arr = temp
temp = []
if K<0 or K>len(temp):
return -1
return temp[K]
#Given array
arr=[1,2,3,4,5,6]
M=2
K=3
#Function call
print(kth_element(arr,M,K)) |
#예제 확장...
str28 = "900123-1234567"
print(str28[7:8])
print("홍길동은 "+str28[0:2]+"년 "+str28[2:4]+"월 "+str28[4:6]+"일 생이다")
myCitizenCode = input("주민번호를 입력하세요: ")
print("\n당신은 "+myCitizenCode[0:2]+"년 "+myCitizenCode[2:4]+"월 "+myCitizenCode[4:6]+"일 생이다")
|
import json
import uuid
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
class VaultTest(APITestCase):
def test_generate_uuid(self):
"""
Tests that a new UUID is generated and returns previously
created generated UUIDs.
"""
url = reverse('uuid-generator')
data = {}
response = self.client.post(url, data, format='json')
response_data = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(response_data), 1)
response = self.client.post(url, data, format='json')
response = self.client.post(url, data, format='json')
response = self.client.post(url, data, format='json')
response_data = json.loads(response.content)
self.assertEqual(len(response_data), 4)
|
from pythonds.basic.deque import Deque
def palchecker(a_string):
chardeque = Deque()
for ch in a_string:
chardeque.addRear(ch)
stillEqual = True
while chardeque.size() > 1 and stillEqual:
first = chardeque.removeFront()
last = chardeque.removeRear()
if first != last:
stillEqual = False
return stillEqual
if __name__ == '__main__':
a = 'lsdkjfskf'
print(a, palchecker(a))
b = 'tommot'
print(b, palchecker(b))
"""
lsdkjfskf False
tommot True
"""
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""The command line interface for calling pyRVT.
See :doc:`usage` for more details.
"""
import argparse
from . import __version__
from .tools import operation_psa2fa, operation_fa2psa
from .motions import DEFAULT_CALC
parser = argparse.ArgumentParser(
prog='pyrvt',
description='Compute response or Fourier amplitude spectra using RVT.')
parser.add_argument(
'--version',
action='version',
version='%(prog)s version ' + str(__version__))
parser.add_argument(
'operation',
help='''Operation to be performed: [psa2fa] converts from
pseudo-spectral acceleration to Fourier amplitude, and [fa2psa] converts
from Fourier amplitude to pseudo-spectral acceleration.''',
choices=['psa2fa', 'fa2psa'])
parser.add_argument(
'-i',
'--input',
help='''Path containing the input file(s). Supported file types are
csv, xls, and xlsx -- provided the required packages have been
installed. A single file or glob can be specified. An example of a
glob would be "input/*_sa.xls" for all files within directory "input"
ending in "_sa.xls".''',
required=True)
parser.add_argument(
'-o',
'--output',
help='''Path where the output files should be created. If this
directory does not exist it will be created. Default: ./output''',
default='./output')
parser.add_argument(
'-d',
'--damping',
default=0.05,
type=float,
help='''Oscillator damping in decimal. Default: 0.05.''')
parser.add_argument(
'-f',
'--fixed-spacing',
action='store_true',
help='''Fixed spacing of the oscillator period of
0.01 to 10 sec log-spaced with 100 points. Target SA values will be
interpolated if needed''')
parser.add_argument(
'-m',
'--method',
default=DEFAULT_CALC,
choices=['BJ84', 'BT12', 'DK85', 'LP99', 'TM87', 'V75'],
help='''Specify the peak factor calculation method. Possible options
are: [BJ84] Boore and Joyner (1984), [BT12] Boore and Thompson (2012),
[DK85] Der Kiureghian (1985), [LP99] Liu and Pezeshk (1999), [TM87] Toro
and McGuire (1987), and [V75] Vanmarcke (1975). If the BT12 method is used,
then the magnitude, distance and region must be provided by the input
files. If no value is provided, then '%(default)s' is used as the
default.''')
def main():
"""Perform the command line operations."""
args = parser.parse_args()
if args.operation == 'psa2fa':
operation_psa2fa(args.input, args.output, args.damping, args.method,
args.fixed_spacing)
elif args.operation == 'fa2psa':
operation_fa2psa(args.input, args.output, args.damping, args.method,
args.fixed_spacing)
else:
raise NotImplementedError
if __name__ == '__main__':
main()
|
from tenacity import retry, stop_after_delay
@retry(stop=stop_after_delay(10))
def stop_after_delay_test():
print('retry')
raise Exception
stop_after_delay_test()
"""
retry
retry
~~~~~
10秒後に止まる
"""
|
# -*- coding: utf-8 -*-
from django import forms
from djspace.core.models import BIRTH_YEAR_CHOICES
from djspace.core.models import DISABILITY_CHOICES
from djspace.core.models import REG_TYPE
from djspace.core.models import GenericChoice
from djspace.core.models import UserProfile
from djtools.fields import BINARY_CHOICES
from djtools.fields import GENDER_CHOICES
from djtools.fields import SALUTATION_TITLES
from djtools.fields import STATE_CHOICES
from djtools.fields.localflavor import USPhoneNumberField
RACES = GenericChoice.objects.filter(tags__name__in=['Race']).order_by('name')
class UserForm(forms.Form):
"""Django User data plus salutation and second_name from profile."""
salutation = forms.CharField(
widget=forms.Select(choices=SALUTATION_TITLES),
max_length=16,
required=False,
)
first_name = forms.CharField(max_length=30)
second_name = forms.CharField(
label="Second name, middle name or initial",
max_length=30,
)
last_name = forms.CharField(
max_length=30,
)
class UserProfileForm(forms.ModelForm):
"""User profile data."""
registration_type = forms.CharField(
max_length=32,
widget=forms.Select(choices=REG_TYPE),
)
date_of_birth = forms.DateField(
label="Date of birth",
widget=forms.SelectDateWidget(years=BIRTH_YEAR_CHOICES),
)
gender = forms.TypedChoiceField(
choices=GENDER_CHOICES, widget=forms.RadioSelect(),
)
race = forms.ModelMultipleChoiceField(
label="Race and Ethnicity",
queryset=RACES,
help_text='Check all that apply',
widget=forms.CheckboxSelectMultiple(),
)
tribe = forms.CharField(
max_length=128,
required=False,
)
disability = forms.CharField(
label="Disability status",
widget=forms.Select(choices=DISABILITY_CHOICES),
)
disability_specify = forms.CharField(
label="Specify if not listed",
max_length=255,
required=False,
)
us_citizen = forms.TypedChoiceField(
label="United States Citizen",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
military = forms.TypedChoiceField(
label="Have you served in the United States military?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
address1 = forms.CharField(label="Street address", max_length=128)
address2 = forms.CharField(label="", max_length=128, required=False)
city = forms.CharField(max_length=128)
state = forms.CharField(widget=forms.Select(choices=STATE_CHOICES))
postal_code = forms.CharField(label="Postal Code", max_length=10)
address1_current = forms.CharField(
label="Street address",
max_length=128,
required=False,
)
address2_current = forms.CharField(label="", max_length=128, required=False)
city_current = forms.CharField(label="City", max_length=128, required=False)
state_current = forms.CharField(
label="State",
widget=forms.Select(choices=STATE_CHOICES),
required=False,
)
postal_code_current = forms.CharField(
label="Postal Code",
max_length=10,
required=False,
)
phone_primary = USPhoneNumberField(
label="Primary phone",
widget=forms.TextInput(attrs={'placeholder': 'eg. 123-456-7890'}),
)
phone_mobile = USPhoneNumberField(
label="Cell phone",
widget=forms.TextInput(attrs={'placeholder': 'eg. 123-456-7890'}),
)
class Meta:
"""Information about the form class."""
model = UserProfile
exclude = ('user', 'salutation', 'second_name')
fields = [
'registration_type',
'date_of_birth',
'gender',
'race',
'tribe',
'disability',
'disability_specify',
'employment',
'military',
'us_citizen',
'address1_current',
'address2_current',
'city_current',
'state_current',
'postal_code_current',
'address1',
'address2',
'city',
'state',
'postal_code',
'phone_primary',
'phone_mobile',
]
def clean(self):
"""Form validation."""
cd = super(UserProfileForm, self).clean()
# current address is required for students
if cd.get('registration_type') in {'Undergraduate', 'Graduate'}:
if not cd.get('address1_current'):
self._errors['address1_current'] = self.error_class(
["Required field"],
)
if not cd.get('city_current'):
self._errors['city_current'] = self.error_class(
["Required field"],
)
if not cd.get('state_current'):
self._errors['state_current'] = self.error_class(
["Required field"],
)
if not cd.get('postal_code_current'):
self._errors['postal_code_current'] = self.error_class(
["Required field"],
)
disability_error = (
cd.get('disability') == 'I have a disability, but it is not listed' and
cd.get('disability_specify') == ''
)
if disability_error:
self._errors['disability_specify'] = self.error_class(
["Please describe your disability"],
)
return cd
|
"""Chapter 1: Question 8.
Check if string 1 is a rotation of string 2.
"""
def is_rotation_slicing(s1, s2):
"""Uses slicing."""
if not s1:
return False
n = len(s1)
if n != len(s2):
return False
for i in range(n):
if s1[i:n] + s1[0:i] == s2:
return True
return False
def is_rotation_substring(s1, s2):
"""Uses substring method."""
if not s1:
return False
n = len(s1)
if n != len(s2):
return False
s = s1 + s1
return s2 in s
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
__all__ = ['cabbeling', # TODO
'isopycnal_slope_ratio', # TODO
'isopycnal_vs_ntp_CT_ratio', # TODO
'ntp_pt_vs_CT_ratio', # TODO
'thermobaric'] # TODO
def cabbeling():
pass
def isopycnal_slope_ratio():
pass
def isopycnal_vs_ntp_CT_ratio():
pass
def ntp_pt_vs_CT_ratio():
pass
def thermobaric():
pass
if __name__ == '__main__':
import doctest
doctest.testmod()
|
f = open("flash_data.h", "w")
print("const uint32_t flash_data[] = {", file=f)
for i in range(256):
print(" %d," % i, file=f)
print("};", file=f)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
"""API公共校验方法
"""
import base64
import datetime
import json
import arrow
from dateutil.parser import parse
from fta import constants
from fta.utils import hooks
from fta.utils import is_ip as _is_ip
from fta.utils.i18n import _
class ValidateError(Exception):
pass
def is_json(data):
try:
data = json.loads(data)
except BaseException:
raise ValidateError(_("Invalid JSON format"))
if not isinstance(data, dict):
raise ValidateError(_("JSON must be dict type"))
return data
def fix_field_by_app(data, fields, app_id):
for f in fields:
data[f] = "%s:%s" % (app_id, data[f])
return data
def is_required(data, fields):
"""检查是否存在,为空, data是合法的dict
"""
for field in fields:
if not data.get(field):
raise ValidateError(_("The field [%(field)s] does not exist or is empty", field=field))
return data
def is_ip(data, fields):
for field in fields:
value = data[field]
if not _is_ip(value):
raise ValidateError(_("The field [%(field)s] is an invalid IP address", field=field))
return data
def is_datetime(data, fields, format=None, replace=False, tzinfo=None):
for field in fields:
value = data[field]
try:
params = [value]
if format:
params.append(format)
clean_value = arrow.get(*params)
# 如果告警源已经有时区,则忽略tzinfo
try:
source_tzinfo = parse(value).tzinfo
except Exception:
source_tzinfo = None
if source_tzinfo is None and tzinfo:
clean_value = clean_value.replace(tzinfo=tzinfo)
clean_value = clean_value.to('utc').format(constants.STD_ARROW_FORMAT)
except BaseException:
msg = _("The field [%(field)s] is an invalid time format. Time format must be like: %(time)s",
field=field, time=datetime.datetime.now.strftime(format))
raise ValidateError(msg)
else:
if replace:
data[field] = clean_value
return data
def is_format(data, fields, format='', replace=False):
for field in fields:
value = data[field]
try:
if format == 'json':
clean_value = json.loads(value)
elif format == 'base64':
clean_value = base64.b64decode(value)
else:
clean_value = value
except BaseException:
raise ValidateError(
_("The field [%(field)s] is in invalid %(format)s format", field=field, format=format))
else:
if replace:
data[field] = clean_value
return data
hook = hooks.HookImport('manager.www.utils.validate', fail_silently=False)
hook.import_all('is_', env=locals())
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 29 13:37:49 2016
@author: alex
"""
|
# encoding: utf-8
from bs4 import BeautifulSoup
from okscraper.base import BaseScraper
from okscraper.sources import UrlSource, ScraperSource
from okscraper.storages import ListStorage, DictStorage
from lobbyists.models import LobbyistHistory, Lobbyist, LobbyistData, LobbyistRepresent, LobbyistRepresentData
from persons.models import Person
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
class LobbyistsIndexScraper(BaseScraper):
"""
This scraper gets the list of lobbyist ids from the knesset lobbyists page html
returns a list of lobbyist ids - doesn't store anything in db
"""
def __init__(self):
super(LobbyistsIndexScraper, self).__init__(self)
self.source = UrlSource('http://www.knesset.gov.il/lobbyist/heb/lobbyist.aspx')
self.storage = ListStorage()
def _storeLobbyistIdsFromSoup(self, soup):
elts = soup.findAll(lobbyist_id=True)
counter = 0
for elt in elts:
lobbyist_id = elt.get('lobbyist_id')
if lobbyist_id.isdigit():
self.storage.store(lobbyist_id)
self._getLogger().debug(lobbyist_id)
counter = counter + 1
self._getLogger().info('got %s lobbyists', str(counter))
def _scrape(self):
html = self.source.fetch()
soup = BeautifulSoup(html)
return self._storeLobbyistIdsFromSoup(soup)
|
from ownership import server
from ownership.models import Owners
import unittest
import json
import mock
import uuid
#Set up test models
test_owner1 = Owners()
test_owner1.lrid = 'a8098c1a-f86e-11da-bd1a-00112444be1e'
test_owner1.owner_index = 1
test_owner2 = Owners()
test_owner2.lrid = 'd7cd9904-2f84-11e4-b2e1-0800277f1059'
test_owner2.owner_index = 2
test_owners = [test_owner1, test_owner2]
class OwnershipTestCase(unittest.TestCase):
def setUp(self):
server.app.config['TESTING'] = True
self.app = server.app.test_client()
def test_server(self):
response = self.app.get('/')
assert response.status == '200 OK'
@mock.patch('ownership.server._find_owners', return_value=test_owners)
def test_get_owners(self, mock_find):
data = json.dumps({"title_number":"DN100"})
response = self.app.post('/owners', data=data, content_type='application/json')
assert response.data == '{"owners": [{"index": 1, "lrid": "a8098c1a-f86e-11da-bd1a-00112444be1e"}, {"index": 2, "lrid": "d7cd9904-2f84-11e4-b2e1-0800277f1059"}]}'
assert response.status == '200 OK'
@mock.patch('ownership.server._find_owners', return_value=[])
def test_no_owner_found(self, mock_find):
data = json.dumps({"title_number":"DN200"})
response = self.app.post('/owners', data=data, content_type='application/json')
assert response.data == '{"owners": []}'
assert response.status == '200 OK'
def test_bad_request(self):
data = json.dumps({"title_incorrect_spelling":"DN100"})
response = self.app.post('/owners', data=data, content_type='application/json')
assert response.data == 'title_number field not found'
assert response.status == '400 BAD REQUEST'
def test_incorrect_content_type(self):
data = json.dumps({"title_number":"DN100"})
response = self.app.post('/owners', data=data)
assert response.status == '415 UNSUPPORTED MEDIA TYPE'
def test_for_invalid_json(self):
response = self.app.post('/owners', data='{"title_number":DN', content_type='application/json')
assert response.status == '400 BAD REQUEST'
def test_health(self):
response = self.app.get('/health')
self.assertEqual(response.status, '200 OK')
|
import torch
import torch.nn as nn
class ARModelBase(nn.Module):
def __init__(self, encoder, decoder, discriminator, lambda_auto=1.0, lambda_adv=1.0, lambda_cross=1.0):
super().__init__()
self.bos_ix = encoder.src_embedder.get_bos_ix()
self.eos_ix = encoder.src_embedder.get_eos_ix()
self.pad_ix = encoder.src_embedder.get_pad_ix()
self.encoder = encoder
self.decoder = decoder
self.discriminator = discriminator
self.lambda_auto = lambda_auto
self.lambda_adv = lambda_adv
self.lambda_cross = lambda_cross
self.xentropy = nn.CrossEntropyLoss(ignore_index=self.pad_ix, reduction='mean')
def gen_params(self):
return set(self.encoder.parameters()) | set(self.decoder.parameters())
def dis_params(self):
return set(self.discriminator.parameters())
def get_token_num(self, sentences):
return torch.sum(sentences != self.pad_ix)
def cross_entropy_loss(self, logits, targets):
logits_ = logits.reshape(-1, logits.size(-1))
targets_ = targets.reshape(-1)
loss = self.xentropy(logits_, targets_)
return loss
def auto_reconst(self, x, y, teacher, domain, noise=False):
encoded = self.encoder(x, domain=domain, noise=noise)
decoded = self.decoder(x, encoded['outs'], teacher=teacher, domain=domain, teacher_forcing_ratio=0.5)
loss = self.cross_entropy_loss(decoded['logits'], y)
return encoded['outs'], loss
def cross_reconst(self, x, y, teacher, domain, noise=False):
src_domain = 'src' if domain == 'src' else 'tgt'
tgt_domain = 'tgt' if domain == 'src' else 'src'
encoded = self.encoder(x, domain=src_domain, noise=noise)
decoded = self.decoder(x, encoded['outs'], teacher=teacher, domain=tgt_domain, teacher_forcing_ratio=0.5)
loss = self.cross_entropy_loss(decoded['logits'], y)
return encoded['outs'], loss
def adversarial(self, z, domain=None):
batch_size = z.size(0)
if domain == 'tgt':
targets = torch.zeros(batch_size, 1).float().to(z.device)
elif domain == 'src':
targets = torch.ones(batch_size, 1).float().to(z.device)
else:
targets = None
result = self.discriminator(z, targets)
return result['loss']
def forward(self, src_sentences, tgt_sentences, noise=False, wrap_scalars=False):
raise NotImplementedError('The AutoReconstructionModelBase class should never execute forward')
class ARModel(ARModelBase):
def __init__(self, encoder, decoder, discriminator, lambda_auto=1.0, lambda_adv=1.0, lambda_cross=1.0):
super().__init__(encoder, decoder, discriminator, lambda_auto, lambda_adv, lambda_cross)
def forward(self, src_sentences, tgt_sentences, noise=False, wrap_scalars=False):
device = src_sentences.device if src_sentences is not None else tgt_sentences.device
auto_loss = torch.tensor(0., device=device).float()
adv_loss = torch.tensor(0., device=device).float()
cross_loss = torch.tensor(0., device=device).float()
num_toks = torch.tensor(0., device=device).float()
if src_sentences is not None:
x_src, y_src = src_sentences[:, :-1], src_sentences[:, 1:]
# Reconstructing the original source sentences
_, auto_loss_src = self.auto_reconst(x_src, y_src, domain='src', noise=noise)
# Weighting the losses
auto_loss += self.lambda_auto * auto_loss_src
num_toks += self.get_token_num(src_sentences)
if tgt_sentences is not None:
x_tgt, y_tgt = tgt_sentences[:, :-1], tgt_sentences[:, 1:]
# Reconstructing the original target sentences
_, auto_loss_tgt = self.auto_reconst(x_tgt, y_tgt, domain='tgt', noise=noise)
# Weighting the losses
auto_loss += self.lambda_auto * auto_loss_tgt
num_toks += self.get_token_num(tgt_sentences)
if wrap_scalars:
auto_loss = auto_loss.unsqueeze(0)
adv_loss = adv_loss.unsqueeze(0)
cross_loss = cross_loss.unsqueeze(0)
num_toks = num_toks.unsqueeze(0)
result = {
'loss': auto_loss + adv_loss + cross_loss,
'auto_loss': auto_loss,
'adv_loss': adv_loss,
'cross_loss': cross_loss,
'num_toks': num_toks
}
return result
class AdversarialARModel(ARModelBase):
def __init__(self, encoder, decoder, discriminator, lambda_auto=1.0, lambda_adv=1.0, lambda_cross=1.0):
super().__init__(encoder, decoder, discriminator, lambda_auto, lambda_adv, lambda_cross)
def forward(self, src_sentences, tgt_sentences, noise=False, wrap_scalars=False):
device = src_sentences.device if src_sentences is not None else tgt_sentences.device
auto_loss = torch.tensor(0., device=device).float()
adv_loss = torch.tensor(0., device=device).float()
cross_loss = torch.tensor(0., device=device).float()
num_toks = torch.tensor(0., device=device).float()
if src_sentences is not None:
x_src, y_src = src_sentences[:, :-1], src_sentences[:, 1:]
# Reconstructing the original source sentences
z_src, auto_loss_src = self.auto_reconst(x_src, y_src, domain='src', noise=noise)
# Adversarial loss for auto reconstruction on source sentences
adv_loss_src = self.adversarial(z_src, domain='src')
# Weighting the losses
auto_loss += self.lambda_auto * auto_loss_src
adv_loss += self.lambda_adv * adv_loss_src
num_toks += self.get_token_num(src_sentences)
if tgt_sentences is not None:
x_tgt, y_tgt = tgt_sentences[:, :-1], tgt_sentences[:, 1:]
# Reconstructing the original target sentences
z_tgt, auto_loss_tgt = self.auto_reconst(x_tgt, y_tgt, domain='tgt', noise=noise)
# Adversarial loss for auto reconstruction on target sentences
adv_loss_tgt = self.adversarial(z_tgt, domain='tgt')
# Weighting the losses
auto_loss += self.lambda_auto * auto_loss_tgt
adv_loss += self.lambda_adv * adv_loss_tgt
num_toks += self.get_token_num(tgt_sentences)
if wrap_scalars:
auto_loss = auto_loss.unsqueeze(0)
adv_loss = adv_loss.unsqueeze(0)
cross_loss = cross_loss.unsqueeze(0)
num_toks = num_toks.unsqueeze(0)
result = {
'loss': auto_loss + adv_loss + cross_loss,
'auto_loss': auto_loss,
'adv_loss': adv_loss,
'cross_loss': cross_loss,
'num_toks': num_toks
}
return result
class CrossDomainARModel(ARModelBase):
def __init__(self, encoder, decoder, discriminator, lambda_auto=1.0, lambda_adv=1.0, lambda_cross=1.0):
super().__init__(encoder, decoder, discriminator, lambda_auto, lambda_adv, lambda_cross)
def forward(self, src_sentences, tgt_sentences, noise=False, wrap_scalars=False):
device = src_sentences.device
auto_loss = torch.tensor(0., device=device).float()
adv_loss = torch.tensor(0., device=device).float()
cross_loss = torch.tensor(0., device=device).float()
num_toks = torch.tensor(0., device=device).float()
x_src, y_src = src_sentences[:, :-1], src_sentences[:, 1:]
x_tgt, y_tgt = tgt_sentences[:, :-1], tgt_sentences[:, 1:]
# Reconstructing the original sentences
z_src, auto_loss_src = self.auto_reconst(x_src, y_src, teacher=x_src, domain='src', noise=noise)
z_tgt, auto_loss_tgt = self.auto_reconst(x_tgt, y_tgt, teacher=x_tgt, domain='tgt', noise=noise)
# Adversarial loss for auto reconstruction
adv_loss_src = self.adversarial(z_src, domain='src')
adv_loss_tgt = self.adversarial(z_tgt, domain='tgt')
# Weighting the losses
auto_loss += self.lambda_auto * (auto_loss_src + auto_loss_tgt)
adv_loss += self.lambda_adv * (adv_loss_src + adv_loss_tgt)
num_toks += self.get_token_num(src_sentences) + self.get_token_num(tgt_sentences)
if self.lambda_cross != 0:
# Getting the mappings between domains to do cross-domain reconstruction
fake_x_tgt = self.decoder.generate(z_src, x_src.size(1), 1.0, 'greedy', 'tgt', device, strip=False)
fake_x_src = self.decoder.generate(z_tgt, x_tgt.size(1), 1.0, 'greedy', 'src', device, strip=False)
# Reconstructing the mapped fake_x to the original domain
cross_z_tgt, cross_loss_src = self.cross_reconst(fake_x_tgt, y_src, teacher=x_src, domain='tgt')
cross_z_src, cross_loss_tgt = self.cross_reconst(fake_x_src, y_tgt, teacher=x_tgt, domain='src')
# Adversarial loss for cross-domain reconstruction
adv_cross_loss_src = self.adversarial(cross_z_tgt, domain='tgt')
adv_cross_loss_tgt = self.adversarial(cross_z_src, domain='src')
# Weighting the losses
adv_loss += self.lambda_adv * (adv_cross_loss_src + adv_cross_loss_tgt)
cross_loss += self.lambda_cross * (cross_loss_src + cross_loss_tgt)
if wrap_scalars:
auto_loss = auto_loss.unsqueeze(0)
adv_loss = adv_loss.unsqueeze(0)
cross_loss = cross_loss.unsqueeze(0)
num_toks = num_toks.unsqueeze(0)
result = {
'loss': auto_loss + adv_loss + cross_loss,
'auto_loss': auto_loss,
'adv_loss': adv_loss,
'cross_loss': cross_loss,
'num_toks': num_toks
}
return result
|
import numpy as np
from numpy import array
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
sns.set(rc={'figure.figsize': (8, 6.)})
sns.set_style("whitegrid")
def target(training, test, valid):
plt.figure()
plt.title('Target function performance training vs validation data')
plt.plot(training, label='training');
plt.plot(test, label='test');
plt.plot(valid, label='validation');
plt.legend()
def misclassification(training, test, valid):
plt.figure()
plt.title('Classification accuracy')
plt.plot(training, label='training');
plt.plot(test, label='test');
plt.plot(valid, label='validation');
plt.legend()
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
def update_line(hl, new_data):
hl.set_xdata(numpy.append(hl.get_xdata(), new_data))
hl.set_ydata(numpy.append(hl.get_ydata(), new_data))
plt.draw() |
print('DIVISÃO EM PYTHON\n')
try:
a = int(input('Numerador: '))
b = int(input('Denominador: '))
r = a / b
except (ValueError, TypeError) as erro1:
print(f'Ocorreu um erro com os valores que introduziu.')
except ZeroDivisionError as erro2:
print('Não é possível dividir por 0.')
except KeyboardInterrupt as erro3:
print('Não inseriu od dados!')
except Exception as erro: # Catch-all exception
print(f'O erro foi causado por {erro.__cause__}')
else:
print(f'O resultado da divisão é {r:.2}')
finally:
print('\nEspero que o preograma tenha sido útil!') |
import turtle as t
import random
#---------------------------------------#
# SCREEN SETUP #
#---------------------------------------#
width = 10
height = 5
t.setup(width*100+20,height*100+20)
screen = t.Screen()
screen.title("Captain Planet")
screen.screensize(width*100,height*100)
right = 100*(width/2-1)
top = 100*height/2
screen.bgpic("bgpic.gif")
#GLOBAL TURN
turn = 0
#---------------------------------------#
# POLLUTION'S ENDLESS MARCH #
#---------------------------------------#
t.register_shape("square", ((0,0),(0,50), (50,50), (50,0)))
waste = t.Turtle()
waste.ht()
waste.up()
waste.speed(0)
waste.shape("square")
waste.goto(right, 0)
def waste_left():
global waste
waste.goto(waste.pos()[0]-55, waste.pos()[1])
def pollute():
global waste
waste.stamp()
if random.random() > 0.3:
waste.goto(waste.pos()[0], waste.pos()[1]+random.triangular(-55,55))
else:
waste_left()
#---------------------------------------#
# CLICK LISTENERS #
#---------------------------------------#
elements = [ "earth","fire","wind","water","heart"]
colors = ["brown", "orange", "grey", "blue", "red"]
heroes = []
for i in range(len(elements)):
t.register_shape(elements[i]+".gif")
t1 = t.Turtle()
t1.shape(elements[i]+".gif")
t1.up()
t1.goto(50+122*(i-1), -100)
heroes.append(t1)
def click(x,y):
global turn
reset_state()
screen.onclick(None) #Stop Listening
heroes[turn%(len(heroes))].goto(x,y) #Move
pollute()
turn += 1
screen.onclick(click) #Listen again
screen.onclick(click)
#---------------------------------------#
# KEYBOARD LISTENERS: (num keys 1-5) #
#---------------------------------------#
connector = t.Turtle()
connector.pensize(10)
connector.up()
state = []
def reset_state():
global state, connector
connector.clear() #Clear all lines.
connector.up() #Connector ready for next sequence.
state = [] #State initialized
def captain_planet():
global state, connector
print ("Captain Planet!")
print (state)
reset_state()
def command(key):
global state, connector, colors
def f():
global state, connector, colors
if len(state) > 0 and key in state[1:]:
return
#Begin Move to hero
for i in range(len(heroes)):
screen.onkey(None, str(i+1)) #Stop listening
connector.goto(heroes[key].pos()) #Move
for i in range(len(heroes)):
screen.onkey(command(i), str(i+1)) #Listen again.
#End Move
connector.down()
connector.pencolor(colors[key])
if len(state)>0 and state[0] == key:
captain_planet()
else:
state.append(key)
return f
for i in range(len(heroes)):
screen.onkey(command(i), str(i+1))
screen.listen()
#---------------------------------------#
# MAIN LOOP #
#---------------------------------------#
t.mainloop() |
'''
Given a non-empty binary search tree and a target value, find the value in the BST that is closest to the target.
Note:
Given target value is a floating point.
You are guaranteed to have only one unique value in the BST that is closest to the target.
Example:
Input: root = [4,2,5,1,3], target = 3.714286
4
/ \
2 5
/ \
1 3
Output: 4
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def closestValue(self, root, target):
"""
:type root: TreeNode
:type target: float
:rtype: int
"""
res = [root.val]
self.find(root, target, res)
return res[0]
def find(self, node, target, res):
if abs(res[0] - target) > abs(node.val - target):
res[0] = node.val
if node.left:
self.find(node.left, target, res)
if node.right:
self.find(node.right, target, res)
|
from header import *
from .utils import *
from .util_func import *
class BERTDualInferenceContextDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.split(path)[0]}/inference_ctx_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
data = read_text_data_utterances(path, lang=self.args['lang'])
self.data = []
for label, utterances in tqdm(data):
if label == 0:
continue
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
ids = []
for u in item[:-1]:
ids.extend(u + [self.eos])
ids.pop()
ids = ids[-self.args['max_len']+2:]
ids = [self.cls] + ids + [self.sep]
self.data.append({
'ids': ids,
'context': utterances[:-1],
'response': utterances[-1],
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
context = bundle['context']
response = bundle['response']
return ids, context, response
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
ids = [i[0] for i in batch]
context = [i[1] for i in batch]
response = [i[2] for i in batch]
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
ids_mask = generate_mask(ids)
ids, ids_mask = to_cuda(ids, ids_mask)
return {
'ids': ids,
'mask': ids_mask,
'context': context,
'response': response
}
class BERTDualInferenceFullContextDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.split(path)[0]}/inference_full_ctx_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
data = read_text_data_utterances_full(path, lang=self.args['lang'], turn_length=args['full_turn_length'])
self.data = []
for label, utterances in tqdm(data):
if label == 0:
continue
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
ids = []
for u in item[:-1]:
ids.extend(u + [self.eos])
ids.pop()
ids = ids[-self.args['max_len']+2:]
ids = [self.cls] + ids + [self.sep]
self.data.append({
'ids': ids,
'context': utterances[:-1],
'response': utterances[-1],
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
context = bundle['context']
response = bundle['response']
return ids, context, response
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
ids = [i[0] for i in batch]
context = [i[1] for i in batch]
response = [i[2] for i in batch]
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
ids_mask = generate_mask(ids)
ids, ids_mask = to_cuda(ids, ids_mask)
return {
'ids': ids,
'mask': ids_mask,
'context': context,
'response': response
}
class BERTDualInferenceFullContextSingleExtendDataset(Dataset):
'''each in-dataset utterance will be treated as the extended context for training'''
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.split(path)[0]}/inference_full_ctx_ext_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
data = read_response_data_full(path, lang=self.args['lang'], turn_length=5)
self.data = []
for utterance in tqdm(data):
ids = self.vocab.encode(utterance, add_special_tokens=False)
ids = ids[-self.args['max_len']+2:]
ids = [self.cls] + ids + [self.sep]
self.data.append({
'ids': ids,
'context': utterance,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
context = bundle['context']
return ids, context
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
ids = [i[0] for i in batch]
context = [i[1] for i in batch]
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
ids_mask = generate_mask(ids)
ids, ids_mask = to_cuda(ids, ids_mask)
return {
'ids': ids,
'mask': ids_mask,
'context': context,
'response': context,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.