content
stringlengths
5
1.05M
import cv2 import numpy as np face_csc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') img = cv2.imread('richfacepoorface.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_csc.detectMultiScale(gray,1.1,4) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 3) resized_image = cv2.resize(img, (int(img.shape[1]), int(img.shape[0]))) cv2.imshow('stock_people', resized_image) cv2.waitKey(0) cv2.destroyAllWindows() # print(type(faces)) # print(faces)
import operator from prettytoml.elements import traversal as t, traversal from itertools import * from functools import * from prettytoml.elements.metadata import WhitespaceElement from prettytoml.elements.table import TableElement from prettytoml.prettifier import common def deindent_anonymous_table(toml_file_elements): """ Rule: Anonymous table should never be indented. """ anonymous_table_index = _find_anonymous_table(toml_file_elements) if anonymous_table_index is None: return toml_file_elements return toml_file_elements[:anonymous_table_index] + \ [_unindent_table(toml_file_elements[anonymous_table_index])] + \ toml_file_elements[anonymous_table_index+1:] def _unindent_table(table_element): table_lines = tuple(common.lines(table_element.sub_elements)) unindented_lines = tuple(tuple(dropwhile(lambda e: isinstance(e, WhitespaceElement), line)) for line in table_lines) return TableElement(reduce(operator.concat, unindented_lines)) def _find_anonymous_table(toml_file_elements): """ Finds and returns the index of the TableElement comprising the anonymous table or None. """ first_table_index = common.index(t.predicates.table, toml_file_elements) first_table_header_index = common.index(t.predicates.table_header, toml_file_elements) if first_table_header_index is None: return first_table_index elif first_table_index < first_table_header_index: return first_table_index
def count_pair(arr, sum): hm = dict() for elt in arr: hm[elt] = hm.get(elt, 0) + 1 count = 0 for elt in arr: remaining = sum - elt count += hm.get(remaining, 0) return count/2 if __name__ == "__main__": arr = [7, 5, 1, -1, 5] sum = 6 pair_count = count_pair(arr, sum) print "Number of pair(s) with sum equals to {sum} is {pair_count}".format(sum=sum, pair_count=pair_count)
#!/usr/bin/env python3 """ Test for rundaterange limit """ import unittest from base_test import PschedTestBase from pscheduler.limitprocessor.limit.rundaterange import * LIMIT = { "start": "2019-01-01T00:00:00-04", "end": "2019-12-31T23:59:59-04", "overlap": True } LIMIT_NO_OVERLAP = { "start": "2019-01-01T00:00:00-04", "end": "2019-12-31T23:59:59-04", "overlap": False } class TestLimitprocessorLimitRunDateRange(PschedTestBase): """ Test the Limit """ def test_data_is_valid(self): """Limit Processor / Limit Run Date Range / Data Validation""" self.assertEqual(rundaterange_data_is_valid(LIMIT), (True, "OK")) self.assertEqual(rundaterange_data_is_valid({ "bad": "value" }), (False, "At /: Additional properties are not allowed ('bad' was unexpected)")) def test_limit(self): """Limit Processor / Limit Run Date Range / Limit""" limit = LimitRunDateRange(LIMIT) # In range self.assertEqual( limit.evaluate({ "task": { "run_schedule": { "start": "2019-05-04T12:34:56-04", "duration": "PT30S" } } }), { "passed": True } ) # Overlapping self.assertEqual( limit.evaluate({ "task": { "run_schedule": { "start": "2018-12-31T23:59:45-04", "duration": "PT30S" } } }), { "passed": True } ) # Out of range self.assertEqual( limit.evaluate({ "task": { "run_schedule": { "start": "2012-05-04T12:34:56-04", "duration": "PT30S" } } }), { "passed": False, "reasons": [ "Ranges do not match" ] } ) limit = LimitRunDateRange(LIMIT_NO_OVERLAP) # Overlapping self.assertEqual( limit.evaluate({ "task": { "run_schedule": { "start": "2018-12-31T23:59:45-04", "duration": "PT30S" } } }), { "passed": False, "reasons": [ "Ranges do not match" ] } ) if __name__ == '__main__': unittest.main()
""" Helper class to interact with the Matomo API """ import re import requests from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry class MatomoApiManager: """ This class helps to interact with Matomo API There are several functions to retrieve unique visitors for last 30 days, a month, and a year. You are also able to add new regions to your matomo instance and furthermore. """ protocol = "https://" # Protocol "http://" or "https://" """ Variable which allows you also to accept broken ssl-certificates with requests library (False == ignore ssl-issues, True == dont allow broken ssl-certificates """ ssl_verify = True matomo_url = "" # URL to Matomo-Instance matomo_api_key = "" # Matomo API-key def __init__(self, matomo_url, matomo_api_key, ssl_verify): """ Constructor initialises matomo_url, matomo_api_key, ssl_verify :param matomo_url: :param matomo_api_key: :param ssl_verify: """ self.matomo_url = matomo_url self.matomo_api_key = matomo_api_key self.matomo_api_key = ( "&token_auth=" + self.matomo_api_key ) # concats token api-parameter self.ssl_verify = ssl_verify self.cleanmatomo_url() # cleans matomo url for proper requests def cleanmatomo_url(self): """ Cleans Matomo-URL for proper requests. Checks ending slash and beginning http(s):// """ self.matomo_url = re.sub(r"/\/$/", "", self.matomo_url) # Cuts "/" if re.match(r"^http://", self.matomo_url): # replace it to "https://" self.matomo_url = re.sub("^http://", "", self.matomo_url) self.matomo_url = self.protocol + self.matomo_url elif not bool( re.match("^https://", self.matomo_url) ): # check for "https://" and set it self.matomo_url = self.protocol + self.matomo_url def checkmatomo_url(self): """ This method checks the proper functionality of a simple url request :return: True or False """ try: http_code = requests.get( self.matomo_url, verify=self.ssl_verify ).status_code if http_code == 200: return True return False except ConnectionError: return False def get_visitors_per_timerange(self, date_string, region_id, period, lang): """ Returns the total unique visitors in a timerange as definded in period :param region_id: String :param date_string: String "yyyy-mm-dd,yyyy-mm-dd" :param period: String "day", "week", "month", "year" :param lang: String contains the language, that is called :return: List[Date, Hits] """ domain = self.matomo_url api_key = self.matomo_api_key url = f"""{domain}/index.php?date={date_string}&expanded=1 &filter_limit=-1&format=JSON&format_metrics=1 &idSite={region_id}&method=API.get&module=API&period={period} &segment=pageUrl%253D@%25252F{lang} %25252Fwp-json%25252F{api_key}""" session = requests.Session() retry = Retry(connect=3, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry) session.mount("http://", adapter) session.mount("https://", adapter) response = session.get(url).json() result = [] for json_object in response: if period == "day": if response[json_object] == []: result.append( [ re.sub( r"(\d{4})-(\d{1,2})-(\d{1,2})", "\\3-\\2-\\1", json_object, ), 0, ] ) else: result.append( [ re.sub( r"(\d{4})-(\d{1,2})-(\d{1,2})", "\\3-\\2-\\1", json_object, ), response[json_object]["nb_uniq_visitors"], ] ) elif period == "month": if response[json_object] == []: result.append( [re.sub(r"(\d{4})-(\d{1,2})", "\\2-\\1", json_object), 0] ) else: result.append( [ re.sub(r"(\d{4})-(\d{1,2})", "\\2-\\1", json_object), response[json_object]["nb_uniq_visitors"], ] ) return result
from cryptography.fernet import Fernet if __name__ == '__main__': print(Fernet.generate_key())
from itscsapp.utils.models import ITSModel from django.db import models class Contact(ITSModel): name = models.CharField(max_length=255, verbose_name='Nombre') phone_number = models.CharField(max_length=255, verbose_name='Numero De Telefono') subject = models.CharField(max_length=255, verbose_name='Asunto') email = models.EmailField(verbose_name='Email') message = models.TextField(verbose_name='Mensaje') created = models.DateTimeField(auto_now_add="True", verbose_name='Fecha de creacion') updated = models.DateTimeField(auto_now='True', verbose_name='Fecha de actualizacion') class Meta: verbose_name = 'Contacto' verbose_name_plural = 'contactos' ordering = ['-created'] def __str__(self): return self.name
"""Facebook ResNet-200 Torch Model Model with weights ported from https://github.com/facebook/fb.resnet.torch (BSD-3-Clause) using https://github.com/clcarwin/convert_torch_to_pytorch (MIT) """ import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch.autograd import Variable from functools import reduce from collections import OrderedDict from .adaptive_avgmax_pool import * model_urls = { 'fbresnet200': 'https://www.dropbox.com/s/tchq8fbdd4wabjx/fbresnet_200-37304a01b.pth?dl=1', } class LambdaBase(nn.Sequential): def __init__(self, fn, *args): super(LambdaBase, self).__init__(*args) self.lambda_func = fn def forward_prepare(self, input): output = [] for module in self._modules.values(): output.append(module(input)) return output if output else input class Lambda(LambdaBase): def forward(self, input): return self.lambda_func(self.forward_prepare(input)) class LambdaMap(LambdaBase): def forward(self, input): return list(map(self.lambda_func, self.forward_prepare(input))) class LambdaReduce(LambdaBase): def forward(self, input): return reduce(self.lambda_func, self.forward_prepare(input)) def fbresnet200_features(activation_fn=nn.ReLU()): return nn.Sequential( # Sequential, nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3)), nn.BatchNorm2d(64), activation_fn, nn.MaxPool2d((3, 3), (2, 2), (1, 1)), nn.Sequential( # Sequential, nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 64, (1, 1)), nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 256, (1, 1)), ), nn.Sequential( # Sequential, nn.Conv2d(64, 256, (1, 1)), nn.BatchNorm2d(256), ), ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 64, (1, 1)), nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 256, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 64, (1, 1)), nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(64), activation_fn, nn.Conv2d(64, 256, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), ), nn.Sequential( # Sequential, nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (2, 2), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), nn.Sequential( # Sequential, nn.Conv2d(256, 512, (1, 1), (2, 2)), nn.BatchNorm2d(512), ), ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 128, (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(128), activation_fn, nn.Conv2d(128, 512, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), ), nn.Sequential( # Sequential, nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), nn.Sequential( # Sequential, nn.Conv2d(512, 1024, (1, 1), (2, 2)), nn.BatchNorm2d(1024), ), ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 256, (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(256), activation_fn, nn.Conv2d(256, 1024, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), ), nn.Sequential( # Sequential, nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(1024), activation_fn, nn.Conv2d(1024, 512, (1, 1)), nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1)), nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 2048, (1, 1)), ), nn.Sequential( # Sequential, nn.Conv2d(1024, 2048, (1, 1), (2, 2)), nn.BatchNorm2d(2048), ), ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(2048), activation_fn, nn.Conv2d(2048, 512, (1, 1)), nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 2048, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), nn.Sequential( # Sequential, LambdaMap(lambda x: x, # ConcatTable, nn.Sequential( # Sequential, nn.BatchNorm2d(2048), activation_fn, nn.Conv2d(2048, 512, (1, 1)), nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)), nn.BatchNorm2d(512), activation_fn, nn.Conv2d(512, 2048, (1, 1)), ), Lambda(lambda x: x), # Identity, ), LambdaReduce(lambda x, y: x + y), # CAddTable, ), ), Lambda(lambda x: x), # Copy, nn.BatchNorm2d(2048), activation_fn, ) class ResNet200(nn.Module): def __init__(self, num_classes=1000, activation_fn=nn.ReLU(), drop_rate=0., global_pool='avg'): super(ResNet200, self).__init__() self.drop_rate = drop_rate self.global_pool = global_pool self.num_classes = num_classes self.features = fbresnet200_features(activation_fn=activation_fn) self.fc = nn.Linear(2048 * pooling_factor(global_pool), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.global_pool = global_pool self.num_classes = num_classes self.fc = nn.Linear(2048 * pooling_factor(global_pool), num_classes) def forward_features(self, x, pool=True): x = self.features(input) if pool: x = adaptive_avgmax_pool2d(x, self.global_pool) x = x.view(x.size(0), -1) return x def forward(self, x): x = self.forward_features(x) if self.drop_rate > 0: x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.fc(x) return x def fbresnet200(pretrained=False, num_classes=1000, **kwargs): model = ResNet200(num_classes=num_classes, **kwargs) if pretrained: # Remap pretrained weights to match our class module with features + fc pretrained_weights = model_zoo.load_url(model_urls['fbresnet200']) feature_keys = filter(lambda k: '13.1.' not in k, pretrained_weights.keys()) remapped_weights = OrderedDict() for k in feature_keys: remapped_weights['features.' + k] = pretrained_weights[k] remapped_weights['fc.weight'] = pretrained_weights['13.1.weight'] remapped_weights['fc.bias'] = pretrained_weights['13.1.bias'] model.load_state_dict(remapped_weights) return model
# Copyright (c) 2013 Vindeka, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: gate_conn # You'll see gate_conn passed around a few places in this file. This is the # source httplib connection of whatever it is attached to. # It is used when early termination of reading from the connection should # happen, such as when a range request is satisfied but there's still more the # source connection would like to send. To prevent having to read all the data # that could be left, the source connection can be .close() and then reads # commence to empty out any buffers. # These shenanigans are to ensure all related objects can be garbage # collected. We've seen objects hang around forever otherwise. import os import uuid import mimetypes from oslo.config import cfg from random import shuffle from time import time from eventlet import Timeout from gate.common.utils import get_logger, split_path, \ config_true_value, check_utf8 from gate.controllers import CaseController #, EvidenceController, \ # ObjectController from gate.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \ HTTPServerError, Request class Application(object): """WSGI application for the api server.""" def __init__(self, conf, memcache=None, logger=None, account_ring=None, container_ring=None, object_ring=None): if conf is None: conf = {} if logger is None: self.logger = get_logger(conf, log_route='api-server') else: self.logger = logger gate_dir = conf.get('gate_dir', '/etc/gate') self.client_timeout = int(conf.get('client_timeout', 60)) self.put_queue_depth = int(conf.get('put_queue_depth', 10)) self.object_chunk_size = int(conf.get('object_chunk_size', 65536)) self.client_chunk_size = int(conf.get('client_chunk_size', 65536)) self.error_suppression_interval = \ int(conf.get('error_suppression_interval', 60)) self.error_suppression_limit = \ int(conf.get('error_suppression_limit', 10)) self.deny_host_headers = [ host.strip() for host in conf.get('deny_host_headers', '').split(',') if host.strip()] self.rate_limit_after_segment = \ int(conf.get('rate_limit_after_segment', 10)) self.rate_limit_segments_per_sec = \ int(conf.get('rate_limit_segments_per_sec', 1)) def get_controller(self, path): """ Get the controller to handle a request. :param path: path from request :returns: tuple of (controller class, path dictionary) :raises: ValueError (thrown by split_path) if given invalid path """ version, case, evidence, obj = split_path(path, 1, 4, True) d = dict(version=version, case_name=case, evidence_name=evidence, obj_name=obj) if obj and evidence and case: return ObjectController, d elif evidence and case: return ContainerController, d elif case and not container and not obj: return CaseController, d return None, d def __call__(self, env, start_response): """ WSGI entry point. Wraps env in swob.Request object and passes it down. :param env: WSGI environment dictionary :param start_response: WSGI callable """ try: req = Request(env) return self.handle_request(req)(env, start_response) except UnicodeError: err = HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') return err(env, start_response) except (Exception, Timeout): start_response('500 Server Error', [('Content-Type', 'text/plain')]) return ['Internal server error.\n'] def handle_request(self, req): """ Entry point for api server. Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object """ try: self.logger.set_statsd_prefix('api-server') if req.content_length and req.content_length < 0: self.logger.increment('errors') return HTTPBadRequest(request=req, body='Invalid Content-Length') try: if not check_utf8(req.path_info): self.logger.increment('errors') return HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') except UnicodeError: self.logger.increment('errors') return HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') try: controller, path_parts = self.get_controller(req.path) p = req.path_info if isinstance(p, unicode): p = p.encode('utf-8') except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if not controller: self.logger.increment('errors') return HTTPPreconditionFailed(request=req, body='Bad URL') if self.deny_host_headers and \ req.host.split(':')[0] in self.deny_host_headers: return HTTPForbidden(request=req, body='Invalid host header') self.logger.set_statsd_prefix('api-server.' + controller.server_type.lower()) controller = controller(self, **path_parts) if 'gate.trans_id' not in req.environ: # if this wasn't set by an earlier middleware, set it now trans_id = 'tx' + uuid.uuid4().hex req.environ['gate.trans_id'] = trans_id self.logger.txn_id = trans_id req.headers['x-trans-id'] = req.environ['gate.trans_id'] controller.trans_id = req.environ['gate.trans_id'] self.logger.client_ip = get_remote_client(req) try: handler = getattr(controller, req.method) getattr(handler, 'publicly_accessible') except AttributeError: allowed_methods = getattr(controller, 'allowed_methods', set()) return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(allowed_methods)}) if path_parts['version']: req.path_info_pop() if 'gate.authorize' in req.environ: # We call authorize before the handler, always. If authorized, # we remove the gate.authorize hook so isn't ever called # again. If not authorized, we return the denial unless the # controller's method indicates it'd like to gather more # information and try again later. resp = req.environ['gate.authorize'](req) if not resp: # No resp means authorized, no delayed recheck required. del req.environ['gate.authorize'] else: # Response indicates denial, but we might delay the denial # and recheck later. If not delayed, return the error now. if not getattr(handler, 'delay_denial', None): return resp # Save off original request method (GET, POST, etc.) in case it # gets mutated during handling. This way logging can display the # method the client actually sent. req.environ['gate.orig_req_method'] = req.method return handler(req) except (Exception, Timeout): self.logger.exception(_('ERROR Unhandled exception in request')) return HTTPServerError(request=req) def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI proxy apps.""" conf = global_conf.copy() conf.update(local_conf) return Application(conf)
senha = int(input('Jogador 1 digite um valor entre 0 a 100 como senha:\n')) tentativas = 5 if senha >= 0 and senha <= 100: for i in range(tentativas): senha2 = int(input(f'Qual é a senha jogador 2 ? Você tem {tentativas} chances!\n')) if senha2 == senha: print('Acesso permitido!') exit() elif senha2 > senha: print('Voce digitou um numero maior que a senha!') elif senha2 < senha: print('Voce digitou um numero menor que a senha!') tentativas -= 1 print('Tentativas esgotadas!') else: print('Digitos fora do intervalo permitido!')
import os import cv2 as cv import sys sys.path.append(os.path.dirname(os.path.dirname(__file__))) from pose_estimator import PoseEstimator # test run SAMPLES_DIR = os.environ['ROOT_DIR']+"/src/flask_app/static/images/samples" sample_file = SAMPLES_DIR+"/profile_images/sample-profile-image1.jpg" img = cv.imread(sample_file) PoseEstimator(img).visualize_pose()
""""Interface for exercise statistics.""" import datetime import practicer_flask.user_exercise_stats.history_postgres as exercise_history import practicer_flask.user_exercise_stats.streak import practicer_flask.user_exercise_stats.experience import practicer_flask.user_exercise_stats.progress as exercise_progress history_db = exercise_history streak_db = practicer_flask.user_exercise_stats.streak experience_db = practicer_flask.user_exercise_stats.experience def progress(user): progress_data = dict() experience_data = experience(user) for exercise in experience_data.keys(): progress_data[exercise] = exercise_progress.experience_to_progress(experience_data[exercise]) return progress_data def experience(user): return experience_db.experience(user=user) def increase_experience(user, exercise): experience_db.increment_experience(user, exercise) def history(user): return history_db.exercieses(user=user) def add_exercise_to_history(user, exercise): date = datetime.date.today() history_db.add_exercise(user, date, exercise["uuid"]) def streak(user): return streak_db.user_streak(user=user) def increase_streak(user): streak_db.update_streak(user=user) if __name__ == "__main__": print(progress(user=2))
import gzip import json import os import requests from helpers import get_movie_file, get_response def main(): try: movie_file = get_movie_file() except FileNotFoundError: print('File not selected, exiting') return -1 response = get_response(movie_file) if 'useragent is not valid' in response.text: print(response.text) return -1 parsed = json.loads(response.content) if not parsed: print('Could not find subtitle') return -1 print('Movie Found: %s' % parsed[0]['MovieName']) sub_response = requests.get(parsed[0]['SubDownloadLink']) print('Subtitle Found: %s' % parsed[0]['SubFileName']) movie_dir = os.path.dirname(movie_file) sub_name = parsed[0]['SubFileName'] with open(os.path.join(movie_dir, sub_name), 'wb') as fsub: fsub.write(gzip.decompress(sub_response.content)) print('Subtitle extracted to the movie folder') return 0 if __name__ == "__main__": if main() == 0: print('Exiting...')
__author__ = 'Sujit Mandal' #Date : 30-08-2020 import pandas as pd import urllib.request ''' # Author : Sujit Mandal Github : https://github.com/sujitmandal My Package : https://pypi.org/project/images-into-array/ My Package : https://pypi.org/project/scrape-search-engine/ LinkedIn : https://www.linkedin.com/in/sujit-mandal-91215013a/ Facebook : https://www.facebook.com/sujit.mandal.33671748 Twitter : https://twitter.com/mandalsujit37 ''' URL_PATH = ('') # csv file where image URL contain IMAGE_PATH = ('') # Download image path def url_jpg(URL_PATH, IMAGE_PATH): URLS = pd.read_csv(URL_PATH) url = [] for i in enumerate(URLS.values): links = i[1][0] url.append(links) for j in range(len(url)): fileName = ('image{}.jpg'.format(j)) imagePath = ('{}{}'.format(IMAGE_PATH, fileName)) urllib.request.urlretrieve(url[j], imagePath) print('{} saved.'.format(fileName)) def url_jpeg(URL_PATH, IMAGE_PATH): URLS = pd.read_csv(URL_PATH) url = [] for i in enumerate(URLS.values): links = i[1][0] url.append(links) for j in range(len(url)): fileName = ('image{}.jpeg'.format(j)) imagePath = ('{}{}'.format(IMAGE_PATH, fileName)) urllib.request.urlretrieve(url[j], imagePath) print('{} saved.'.format(fileName)) def url_png(URL_PATH, IMAGE_PATH): URLS = pd.read_csv(URL_PATH) url = [] for i in enumerate(URLS.values): links = i[1][0] url.append(links) for j in range(len(url)): fileName = ('image{}.png'.format(j)) imagePath = ('{}{}'.format(IMAGE_PATH, fileName)) urllib.request.urlretrieve(url[j], imagePath) print('{} saved.'.format(fileName)) def url_bmp(URL_PATH, IMAGE_PATH): URLS = pd.read_csv(URL_PATH) url = [] for i in enumerate(URLS.values): links = i[1][0] url.append(links) for j in range(len(url)): fileName = ('image{}.bmp'.format(j)) imagePath = ('{}{}'.format(IMAGE_PATH, fileName)) urllib.request.urlretrieve(url[j], imagePath) print('{} saved.'.format(fileName)) def url_gif(URL_PATH, IMAGE_PATH): URLS = pd.read_csv(URL_PATH) url = [] for i in enumerate(URLS.values): links = i[1][0] url.append(links) for j in range(len(url)): fileName = ('image{}.gif'.format(j)) imagePath = ('{}{}'.format(IMAGE_PATH, fileName)) urllib.request.urlretrieve(url[j], imagePath) print('{} saved.'.format(fileName)) if __name__ == '__main__': url_jpg(URL_PATH, IMAGE_PATH) url_jpeg(URL_PATH, IMAGE_PATH) url_png(URL_PATH, IMAGE_PATH) url_bmp(URL_PATH, IMAGE_PATH) url_gif(URL_PATH, IMAGE_PATH)
""" Book: The Essentials of Computer Architecture. Chapter: 4 Problem: Write a Computer Program that measures the difference in execution times between the integer division and floating point division. Execute the operation 100,000 times and compare the difference in the running time. Interesting Observation: * Integer Division is taking more time. """ import timeit num1 = pow(2,64) num2 = num1 * 2 stmt = "%d/%d" % (num2,num1) print('Test:', stmt) t1 = timeit.Timer(stmt) m1 = (100000 * t1.timeit(100000) / 100000) print('Integer Division takes: %f usecs/loop' % m1) num2 = num1 * 2.0 stmt = "%f/%f" % (num2, num1) print('Test:', stmt) t2 = timeit.Timer(stmt) m2 = (100000 * t2.timeit(100000) / 100000) print('Floating point Division takes: %f usecs/loop' % m2) print('The difference is %s usecs' % (m2-m1))
#While loops i = 1 while i < 8: #While x is less than 8 loop through this code if i == 6: break #Tells python to stop the loop print(i) i += 1 x = 0 while x < 12: x += 2 if x == 8: print("Skips 8") continue #Skips when x is 8 print("X: " + str(x)) else: #When the loop is done do this code print("x is now not less than 12")
import argparse from abc import ABC, abstractclassmethod from argparse import RawTextHelpFormatter from asreview import __version__ class BaseEntryPoint(ABC): """Base class for defining entry points.""" description = "Base Entry point." extension_name = "asreview" version = __version__ @abstractclassmethod def execute(self, argv): """Perform the functionality of the entry point. Arguments --------- argv: list Argument list, with the entry point and program removed. For example, if `asreview plot X` is executed, then argv == ['X']. """ raise NotImplementedError def format(self, entry_name="?"): """Create a short formatted description of the entry point. Arguments --------- entry_name: str Name of the entry point. For example 'plot' in `asreview plot X` """ description = self.description version = getattr(self, "version", "?") extension_name = getattr(self, "extension_name", "?") display_name = f"{entry_name} [{extension_name}-{version}]" return f"{display_name}\n {description}" def _base_parser(prog=None, description=None): """Argument parser for simulate. Parameters ---------- mode : str The mode to run ASReview. prog : str The program name. For example 'asreview'. Returns ------- argparse.ArgumentParser Configured argparser. """ # parse arguments if available parser = argparse.ArgumentParser( prog=prog, description=description, formatter_class=RawTextHelpFormatter ) parser.add_argument( "--embedding", type=str, default=None, dest='embedding_fp', help="File path of embedding matrix. Required for LSTM models." ) parser.add_argument( "--config_file", type=str, default=None, help="Configuration file with model settings" "and parameter values." ) parser.add_argument( "--seed", default=None, type=int, help="Seed for the model (classifiers, balance " "strategies, feature extraction techniques, and query " "strategies). Use an integer between 0 and 2^32 - 1." ) return parser
from PyQt5.QtCore import QObject, pyqtSignal, QThread, QRunnable import time from controllers.WSignals import WSignals import traceback, sys class fakeElevator(QRunnable): ''' Worker thread Inherits from QRunnable to handler worker thread setup, signals and wrap-up. :param callback: The function callback to run on this worker thread. Supplied args and kwargs will be passed through to the runner. :type callback: function :param args: Arguments to pass to the callback function :param kwargs: Keywords to pass to the callback function ''' id = None round = 0 def __init__(self, id, round, *args, **kwargs): super(fakeElevator, self).__init__() # Store constructor arguments (re-used for processing) self.id = id self.round = round self.args = args self.kwargs = kwargs self.signals = WSignals() # Add the callback to our kwargs self.kwargs['progress_callback'] = self.signals.progress def fnRun(self, id, round, progress_callback): for i in range(1, self.round): if i < self.round//2: self.progress_callback.emit(int(self.id), int(1)) else: self.progress_callback.emit(int(self.id), int(-1)) time.sleep(1) @pyqtSlot() def run(self): ''' Initialise the runner function with passed args, kwargs. ''' # Retrieve args/kwargs here; and fire processing using them try: result = self.fnRun(self.id, self.round, **self.kwargs) except: traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: self.signals.result.emit(result) # Return the result of the processing finally: self.signals.finished.emit() # Done # id = None # signal = pyqtSignal(int, int) # round = 0 # def __del__(self): # self.wait() # def __init__(self, id, round, signal): # QRunnable.__init__(self) # self.id = id # self.round = round # self.signal = signal # def run(self): # for i in range(1, self.round): # if i < self.round//2: # self.signal.emit(int(self.id), int(1)) # else: # self.signal.emit(int(self.id), int(-1)) # time.sleep(1)
from hw.maria_saganovich.lesson6_hw.lvl11_relations_bt_2_sets import ( func11_relation_bt_2_sets, ) def test_func11_relation_bt_2_sets() -> None: assert func11_relation_bt_2_sets({1, 2}, {1, 3}) == { # noqa: JS101 "data": { "a&b": {1}, "a|b": {1, 2, 3}, "a-b": {2}, "b-a": {3}, "|a-b|": {2, 3}, "a in b": False, "b in a": False, } } assert func11_relation_bt_2_sets({1, 2}, {1, 2, 3}) == { # noqa: JS101 "data": { "a&b": {1, 2}, "a|b": {1, 2, 3}, "a-b": set(), "b-a": {3}, "|a-b|": {3}, "a in b": True, "b in a": False, } } assert func11_relation_bt_2_sets({1, 3}, {1, 3}) == { # noqa: JS101 "data": { "a&b": {1, 3}, "a|b": {1, 3}, "a-b": set(), "b-a": set(), "|a-b|": set(), "a in b": True, "b in a": True, } } assert func11_relation_bt_2_sets({1}, {""}) == { # noqa: JS101 "data": { "a&b": set(), "a|b": {"", 1}, "a-b": {1}, "b-a": {""}, "|a-b|": {"", 1}, "a in b": False, "b in a": False, } } assert func11_relation_bt_2_sets([], {""}) == { # noqa: JS101 "errors": ["arg1 should be set"] } assert func11_relation_bt_2_sets({""}, []) == { # noqa: JS101 "errors": ["arg2 should be set"] }
# -*- coding: utf-8 -*- import sys from pathlib import Path import numpy as np import pendulum from alg_complexity import datagen from alg_complexity.trees import AVLTree from alg_complexity.utils import class_execution_time, ClassExecTime if __name__ == '__main__': now = pendulum.now() now_str = now.to_datetime_string() spath = Path('results', now_str) spath.mkdir(parents=True) cfg = { 'n_min': 10**1, 'n_max': 10**4, 'n_measures': 20, 'n_repeats': 2, 'n_number': 5 } sys.setrecursionlimit(cfg['n_max']*10) setups = [ # ('random', lambda n: datagen.unique_integers(n, 0, 10000)), ('increasing', lambda n: datagen.range_n(n)), ('decreasing', lambda n: datagen.range_n_inv(n)), ] algorithms = [ ('avl-tree-rec', {'method': 'search', 'method_kwargs': {'mode': 'recursive'}}), ('avl-tree-ite', {'method': 'search', 'method_kwargs': {'mode': 'iterative'}}) ] for setup in setups: # setup_savename = '{setup[0]}-plot-{now}'.format(setup=setup, # now=now_str) # setup_savename = str(spath.joinpath(setup_savename)) class_exec = ClassExecTime(AVLTree, setup[1]) ns, exec_times = class_exec.exec_time(-10, algorithms, **cfg) savename = '{setup[0]}-res-{now}'.format(setup=setup, now=now_str) savename = str(spath.joinpath(savename)) np.save(savename.replace('-res', '-times'), ns, allow_pickle=False) np.save(savename, exec_times, allow_pickle=False) np.save(savename.replace('-res', '-algs'), np.array([alg[0] for alg in algorithms]), allow_pickle=False) # for alg in algorithms: # print('Running setup={setup[0]} -- alg={alg[0]}'.format( # setup=setup, alg=alg)) # savename = '{setup[0]}-{alg[0]}-res-{now}'.format(setup=setup, # alg=alg, # now=now_str) # savename = str(spath.joinpath(savename)) # ns, exec_times = class_exec.exec_time(-10, algorithms, **cfg) # np.save(savename.replace('-res', '-times'), ns, allow_pickle=False) # np.save(savename, exec_times, allow_pickle=False)
# Generated by Django 2.2.12 on 2020-05-20 19:41 from django.db import migrations, models import django.db.models.deletion from contentPages.models import AllResourcesPage, AssetTypePage, ResourceItemPage def create_new_draft_revisions(apps, schema_editor): resource_pages = AllResourcesPage.objects.filter(live=False) for resource_page in resource_pages: resource_page.save_revision() asset_type_pages = AssetTypePage.objects.filter(live=False) for asset_type_page in asset_type_pages: asset_type_page.save_revision() resource_items = ResourceItemPage.objects.filter(live=False) for resource_item in resource_items: resource_item.save_revision() class Migration(migrations.Migration): dependencies = [ ('contentPages', '0039_auto_20200520_1941'), ] operations = [ migrations.RunPython(create_new_draft_revisions), ]
from typing import Any, Dict from utils.globalvars import METADATA_FILE import json class Metadata: """This class manages metadata, which store hardware (such as lna or receiver type), software (such as recipe used) and run-time parameters (such as frequency or name of the satellite). This metadata is locally stored in a JSON file. Some fields of that JSON file will be overwritten (e.g. frequency and name of the sat being received), but other will be left intact. The overall idea is that the station owner can put any additional information there and it will be uploaded when observations are reported. This flexible approach allows users to specify whatever they feel is important about their station - antenna orientation, type and lenght of the cables, etc. The file is stored in ~/.config/svarog/metadata.json. If the file is missing, it is created on the first use.""" filename = METADATA_FILE storage = {} # Stores the keys def __init__(self, filename = METADATA_FILE): self.filename = filename self.loadFile() def loadFile(self): """Loads file from disk. The filename was specified in constructor. The content is parsed and loaded into self.storage""" try: with open(self.filename, 'r') as myfile: data=myfile.read() self.storage = json.loads(data) except: self.createFile() def clear(self): self.storage = {} def createFile(self): """Creates metadata file, trying to guess as many defaults as possible.""" self.clear() self.addDefaults() self.writeFile() def writeFile(self): """Writes content of the metadata to disk.""" with open(self.filename, 'w') as outfile: outfile.write(self.getString()) def getAll(self) -> Dict: """Returns all metadata as dictionary""" return self.storage def getString(self) -> Dict: """Returns all metadata as a string""" return json.dumps(self.storage, indent = 4) def get(self, key: str) -> Any: """Returns the parameter or empty string if missing""" return self.storage[key] if key in self.storage.keys() else "" def addDefaults(self): self.set('antenna', 'unknown') self.set('antenna-type', 'unknown') self.set('receiver', 'RTL-SDR v3') self.set('lna', 'none') self.set('filter', 'none') def set(self, key: str, value: Any): self.storage[key] = value def delete(self, key: str): if key in self.storage: self.storage.pop(key, None)
import csv import sys nargs=len(sys.argv) ofileName='mergedDataset.csv' with open(ofileName, "wb") as f: o=csv.writer(f,quoting=csv.QUOTE_ALL) o.writerow(["IssueTitle","IssueDescription","Label","PrTitle","PrDescription"]) for i in range(1,nargs): with open(sys.argv[i], "rb") as g: fi=csv.reader(g,quoting=csv.QUOTE_ALL) o.writerows(fi)
# -*- coding: utf-8 -*- ''' Created on 2016-10-20 @author: hustcc ''' # for sqlite # DATABASE_URI = 'sqlite:///git_webhook.db' # for mysql DATABASE_URI = 'mysql://root:root@mysql/git_webhook' CELERY_BROKER_URL = 'redis://:@redis:6379/0' CELERY_RESULT_BACKEND = 'redis://:@redis:6379/0' GITHUB_CLIENT_ID = 'b6e751cc48d664240467' GITHUB_CLIENT_SECRET = '6a9e0cbeee1bf89a1e1a25958f35b9dc6b36c996'
#!/usr/bin/env python # -*- coding: utf-8 -*- # # aire.py # # Copyright 2010 Javier Rovegno Campos <tatadeluxe<at>gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # """ Script para saber calida del aire Santiago de Chile """ import urllib import re def extract_source(): url = "http://www.seremisaludrm.cl/sitio/pag/aire/indexjs3aireindices-prueba.asp" sock = urllib.urlopen(url) htmlSource = sock.read() sock.close() return htmlSource def extract_data(htmlSource): source = htmlSource[6644:12988] # Tabla con informacion soup_data = re.findall(r'(?:<span\s.*">)(.*)(?:</span>)',source) j = 0 row = [] for element in soup_data: if j == 0: element = element.replace('&nbsp;',' ') if j == 2: element = element.replace('<br>',' ') element = element.replace(' ','') if j == 3: element = element.replace('<br>',' ') element = element.replace(' ','') row.append(element) j = 0 aux = row row = [] yield aux else: row.append(element) j += 1 def extract_date(htmlSource): source = htmlSource[5974:5974+100] # Informacion return re.findall(r'\d+/\d+/\d+',htmlSource[5974:5974+100])[0] def extract_time(htmlSource): source = htmlSource[5974:5974+100] # Informacion return re.findall(r'\d+:\d+',htmlSource[5974:5974+100])[0] if __name__ == '__main__': import doctest doctest.testmod() htmlSource = extract_source() date = extract_date(htmlSource) time = extract_time(htmlSource) print 'Calidad del aire en Santiago de Chile %s a las %s\n'%(date, time) data = extract_data(htmlSource) for i in data: print '\t\t'.join(i)
from rest_framework import viewsets, filters from django_filters.rest_framework import DjangoFilterBackend from src.Models import Squad class SquadsViewSet(viewsets.ModelViewSet): queryset = Squad.SquadModel.objects.all() serializer_class = Squad.SquadSerializer filter_backends = (filters.SearchFilter, DjangoFilterBackend) search_fields = ['=tribe__id']
#! /usr/bin/env python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for google.protobuf.descriptor_database.""" __author__ = '[email protected] (Matt Toia)' try: import unittest2 as unittest #PY26 except ImportError: import unittest import warnings from google.protobuf import unittest_pb2 from google.protobuf import descriptor_pb2 from google.protobuf.internal import factory_test2_pb2 from google.protobuf.internal import no_package_pb2 from google.protobuf import descriptor_database class DescriptorDatabaseTest(unittest.TestCase): def testAdd(self): db = descriptor_database.DescriptorDatabase() file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( factory_test2_pb2.DESCRIPTOR.serialized_pb) file_desc_proto2 = descriptor_pb2.FileDescriptorProto.FromString( no_package_pb2.DESCRIPTOR.serialized_pb) db.Add(file_desc_proto) db.Add(file_desc_proto2) self.assertEqual(file_desc_proto, db.FindFileByName( 'google/protobuf/internal/factory_test2.proto')) # Can find message type. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Message')) # Can find nested message type. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')) # Can find enum type. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Enum')) # Can find nested enum type. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')) self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.MessageWithNestedEnumOnly.NestedEnum')) # Can find field. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Message.list_field')) # Can find enum value. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Enum.FACTORY_2_VALUE_0')) self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.FACTORY_2_VALUE_0')) self.assertEqual(file_desc_proto2, db.FindFileContainingSymbol( '.NO_PACKAGE_VALUE_0')) # Can find top level extension. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.another_field')) # Can find nested extension inside a message. self.assertEqual(file_desc_proto, db.FindFileContainingSymbol( 'google.protobuf.python.internal.Factory2Message.one_more_field')) # Can find service. file_desc_proto2 = descriptor_pb2.FileDescriptorProto.FromString( unittest_pb2.DESCRIPTOR.serialized_pb) db.Add(file_desc_proto2) self.assertEqual(file_desc_proto2, db.FindFileContainingSymbol( 'protobuf_unittest.TestService')) # Non-existent field under a valid top level symbol can also be # found. The behavior is the same with protobuf C++. self.assertEqual(file_desc_proto2, db.FindFileContainingSymbol( 'protobuf_unittest.TestAllTypes.none_field')) self.assertRaises(KeyError, db.FindFileContainingSymbol, 'protobuf_unittest.NoneMessage') def testConflictRegister(self): db = descriptor_database.DescriptorDatabase() unittest_fd = descriptor_pb2.FileDescriptorProto.FromString( unittest_pb2.DESCRIPTOR.serialized_pb) db.Add(unittest_fd) conflict_fd = descriptor_pb2.FileDescriptorProto.FromString( unittest_pb2.DESCRIPTOR.serialized_pb) conflict_fd.name = 'other_file' with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter('always') db.Add(conflict_fd) self.assertTrue(len(w)) self.assertIs(w[0].category, RuntimeWarning) self.assertIn('Conflict register for file "other_file": ', str(w[0].message)) self.assertIn('already defined in file ' '"google/protobuf/unittest.proto"', str(w[0].message)) if __name__ == '__main__': unittest.main()
def render_graph_DefaultRenderGraph(): g = RenderGraph("DefaultRenderGraph") loadRenderPassLibrary("AccumulatePass.dll") loadRenderPassLibrary("GBuffer.dll") loadRenderPassLibrary("OptixDenoiser.dll") loadRenderPassLibrary("MegakernelPathTracer.dll") loadRenderPassLibrary("ToneMapper.dll") GBufferRT = createPass("GBufferRT", {'forceCullMode': False, 'cull': CullMode.CullBack, 'samplePattern': SamplePattern.Stratified, 'sampleCount': 16}) g.addPass(GBufferRT, "GBufferRT") ColorAccumulatePass = createPass("AccumulatePass", {'enabled': True}) g.addPass(ColorAccumulatePass, "ColorAccumulatePass") AlbedoAccumulatePass = createPass("AccumulatePass", {'enabled': True}) g.addPass(AlbedoAccumulatePass, "AlbedoAccumulatePass") NormalAccumulatePass = createPass("AccumulatePass", {'enabled': True}) g.addPass(NormalAccumulatePass, "NormalAccumulatePass") ToneMappingPass = createPass("ToneMapper", {'autoExposure': False, 'exposureCompensation': 0.0}) g.addPass(ToneMappingPass, "ToneMappingPass") MegakernelPathTracer = createPass("MegakernelPathTracer", {'params': PathTracerParams(useVBuffer=0)}) g.addPass(MegakernelPathTracer, "MegakernelPathTracer") OptixDenoiser = createPass("OptixDenoiser") g.addPass(OptixDenoiser, "OptixDenoiser") g.addEdge("GBufferRT.matlExtra", "MegakernelPathTracer.mtlParams") g.addEdge("GBufferRT.posW", "MegakernelPathTracer.posW") g.addEdge("GBufferRT.normW", "MegakernelPathTracer.normalW") g.addEdge("GBufferRT.tangentW", "MegakernelPathTracer.tangentW") g.addEdge("GBufferRT.faceNormalW", "MegakernelPathTracer.faceNormalW") g.addEdge("GBufferRT.viewW", "MegakernelPathTracer.viewW") g.addEdge("GBufferRT.diffuseOpacity", "MegakernelPathTracer.mtlDiffOpacity") g.addEdge("GBufferRT.specRough", "MegakernelPathTracer.mtlSpecRough") g.addEdge("GBufferRT.emissive", "MegakernelPathTracer.mtlEmissive") g.addEdge("MegakernelPathTracer.color", "ColorAccumulatePass.input") g.addEdge("ColorAccumulatePass.output", "ToneMappingPass.src") g.addEdge("MegakernelPathTracer.albedo", "AlbedoAccumulatePass.input") g.addEdge("GBufferRT.normW", "NormalAccumulatePass.input") g.addEdge("ToneMappingPass.dst", "OptixDenoiser.color") g.addEdge("AlbedoAccumulatePass.output", "OptixDenoiser.albedo") g.addEdge("NormalAccumulatePass.output", "OptixDenoiser.normal") g.markOutput("OptixDenoiser.output") return g DefaultRenderGraph = render_graph_DefaultRenderGraph() try: m.addGraph(DefaultRenderGraph) except NameError: None
frase = str(input('Digite uma frase: ').strip().upper()) palavras = frase.split() juncao = ''.join(palavras) '''inverso = '' for letra in range(len(juncao) - 1, -1, -1): inverso += juncao[letra]''' inverso = juncao[::-1] if inverso == juncao: print('É palíndromo, {} e {}'.format(juncao, inverso)) else: print('NÃO é palíndromo, {} e {}'. format(juncao, inverso))
import re import sys from io import BytesIO from pathlib import Path from random import randbytes from subprocess import PIPE, Popen from urllib.request import urlopen from http import HTTPStatus import pytest from limit_reader import LimitReader def new_reader(size, limit): data = randbytes(size) io = BytesIO(data) return data, LimitReader(io, limit) test_cases = [ # n, limit, read_size (117, 118, None), (73, 70, None), (37, 37, None), (100, 10, 3), (100, 10, 32), (100, 0, 32), (100, 100, 0), ] @pytest.mark.parametrize('n, limit, read_size', test_cases) def test_limit(n, limit, read_size): data = randbytes(n) io = BytesIO(data) rdr = LimitReader(io, limit) out = rdr.read(read_size) if read_size is None: end = limit else: end = min(limit, read_size) assert out == data[:end] def test_attrs(): io = BytesIO() rdr = LimitReader(io, 100) rdr.close() # show now raise with pytest.raises(AttributeError): rdr.no_such_method def test_validate(): with pytest.raises(TypeError): LimitReader('oops') def test_request(httpd_port): readme = Path('README.md') file_size = readme.stat().st_size resp = urlopen(f'http://localhost:{httpd_port}/{readme.name}') read_size = file_size - 42 r = LimitReader(resp, read_size) assert r.status == HTTPStatus.OK data = r.read() assert read_size == len(data) def test_file(): readme = Path('README.md') file_size = readme.stat().st_size read_size = file_size - 7 with readme.open() as fp: r = LimitReader(fp, read_size) assert r.name == readme.name data = r.read() assert read_size == len(data) @pytest.fixture def httpd_port(): # -u for unbuffered stdout, port 0 will pick random free port p = Popen([sys.executable, '-u', '-m', 'http.server', '0'], stdout=PIPE) line = p.stdout.readline().decode() # Serving HTTP on 0.0.0.0 port 42441 (http://0.0.0.0:42441/) ... match = re.search(r'port (\d+)', line) assert match, 'cannot find port in: {line!r}' port = int(match.group(1)) yield port p.kill()
def longest_increasing_subsequence(arr): count = 1 memo = arr[0] for i in range(1, len(arr)): if arr[i] > memo: count = count + 1 memo = arr[i] return count input_sequence = [10, 22, 9, 33, 21, 50, 41, 60] print(longest_increasing_subsequence(input_sequence))
""" parser.http.bsouplxml.html module (imdb.parser.http package). This module adapts the beautifulsoup interface to lxml.html module. Copyright 2008 H. Turgut Uyar <[email protected]> 2008 Davide Alberani <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ import _bsoup as BeautifulSoup def fromstring(html_string): """Return a DOM representation of the string.""" return BeautifulSoup.BeautifulSoup(html_string, convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES ).findChild(True)
# -*- coding: utf-8 -*- """ @Time : 2021/5/26 10:49 @Author : BarneyQ @File : code_test.py @Software : PyCharm @Description : @Modification : @Author : @Time : @Detail : """ import numpy as np screen = np.zeros((10,10)) # print(screen) # # print(screen[0:5:3,0:5:3]) import pydicom
def inicstr(str_data): d = {} if len(str_data) > 0: lst = str_data.split('/') if len(lst) > 3:
import numpy as np import copy from classtestify import Testify from classcapability import Capability from classtable import Table from class2ndmethod import SecondMethod from class2Randr import SecondRate from class1stRandr import FirstRate r""" INPUT: - ''demands'' -- [K*I] matrix: which user is asking for which file - ''distribution'' -- [I*J] matrix: which file is stored by which sender - ''connection'' -- [J*K] matrix: which sender is connected to which user - ''M'' -- cache size of users """ M = 5 demands = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) distribution = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) connection = np.array([[1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1], [0, 0, 1, 0, 1, 1]]) ## ################################################### ##demands = np.array([[0, 0, 1], ## [1, 0, 0], ## [0, 1, 0]]) ##M = 2 ## ##demands = np.array([[1, 0, 0], ## [0, 1, 0], ## [0, 0, 1]]) ## ##distribution = np.array([[1,1,1], ## [1,1,1], ## [1,1,1]]) ## ##connection = np.array([[0,1,1], ## [1,0,1], ## [1,1,0]]) ###################################################### ##M = 1 ## ##demands = np.array([[1,0,0,0], ## [0,1,0,0], ## [0,0,1,0], ## [0,0,0,1]]) ## ##distribution = np.array([[1,0,0], ## [1,0,1], ## [0,1,1], ## [0,1,0]]) ## ##connection = np.array([[1,0,0,1], ## [0,1,1,1], ## [1,1,1,1]]) K = demands.shape[0] I = demands.shape[1] J = distribution.shape[1] t = int(M*K/I) a = Capability(demands, distribution, connection) demands_sender = a.capability_matrix().tolist() b = Table(demands_sender, K, J, M) capability_table = b.table_list() # for the 1st method e = FirstRate(demands_sender, t) rate_pair_1 = e.required_rate() #[R, r] print('1:',rate_pair_1) # for the 2nd method c = SecondMethod(capability_table) track = c.assignment_phase() # or track = c.track d = SecondRate(demands_sender, track, t) rate_pair_2 = d.required_rate() # [R, r] print('2:', rate_pair_2)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from importlib import import_module from django import VERSION as DJANGO_VERSION from django.conf import settings # Do we support set_required and set_disabled? # See GitHub issues 337 and 345 # TODO: Get rid of this after support for Django 1.8 LTS ends DBS3_SET_REQUIRED_SET_DISABLED = DJANGO_VERSION[0] < 2 and DJANGO_VERSION[1] < 10 # Default settings BOOTSTRAP4_DEFAULTS = { 'jquery_url': '//code.jquery.com/jquery-3.1.1.slim.min.js', 'jquery_integrity': 'sha384-A7FZj7v+d/sdmMqp/nOQwliLvUsJfDHW+k9Omg/a/EheAdgtzNs3hpfag6Ed950n', 'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/', 'css_url': None, 'css_integrity': 'sha384-rwoIResjU2yc3z8GV/NPeZWAv56rSmLldC3R/AZzGRnGxQQKnKkoFVhFQhNUwEyJ', 'theme_url': None, 'javascript_url': None, 'javascript_integrity': "sha384-vBWWzlZJ8ea9aCX4pEW3rVHjgjt7zpkNpZk+02D9phzyeVkE+jo0ieGizqPLForn", 'include_jquery': False, 'horizontal_label_class': 'col-md-3', 'horizontal_field_class': 'col-md-9', 'set_placeholder': True, 'required_css_class': '', 'error_css_class': 'has-error', 'success_css_class': 'has-success', 'formset_renderers': { 'default': 'bootstrap4.renderers.FormsetRenderer', }, 'form_renderers': { 'default': 'bootstrap4.renderers.FormRenderer', }, 'field_renderers': { 'default': 'bootstrap4.renderers.FieldRenderer', 'inline': 'bootstrap4.renderers.InlineFieldRenderer', }, } if DBS3_SET_REQUIRED_SET_DISABLED: BOOTSTRAP4_DEFAULTS.update({ 'set_required': True, 'set_disabled': False, }) # Start with a copy of default settings BOOTSTRAP4 = BOOTSTRAP4_DEFAULTS.copy() # Override with user settings from settings.py BOOTSTRAP4.update(getattr(settings, 'BOOTSTRAP4', {})) def get_bootstrap_setting(setting, default=None): """ Read a setting """ return BOOTSTRAP4.get(setting, default) def bootstrap_url(postfix): """ Prefix a relative url with the bootstrap base url """ return get_bootstrap_setting('base_url') + postfix def jquery_url(): """ Return the full url to jQuery file to use """ return get_bootstrap_setting('jquery_url') def jquery_integrity(): """ Return the full url to jQuery file to use """ return get_bootstrap_setting('jquery_integrity') def javascript_url(): """ Return the full url to the Bootstrap JavaScript file """ return get_bootstrap_setting('javascript_url') or \ bootstrap_url('js/bootstrap.min.js') def javascript_url_integrity(): """ Return the full url integrity to the Bootstrap JavaScript file """ return get_bootstrap_setting('javascript_integrity') or \ BOOTSTRAP4_DEFAULTS['javascript_integrity'] def css_url(): """ Return the full url to the Bootstrap CSS file """ return get_bootstrap_setting('css_url') or \ bootstrap_url('css/bootstrap.min.css') def css_url_integrity(): """ Return the full css integrity to the Bootstrap JavaScript file """ return get_bootstrap_setting('css_integrity') or \ BOOTSTRAP4_DEFAULTS['css_integrity'] def theme_url(): """ Return the full url to the theme CSS file """ return get_bootstrap_setting('theme_url') def get_renderer(renderers, **kwargs): layout = kwargs.get('layout', '') path = renderers.get(layout, renderers['default']) mod, cls = path.rsplit(".", 1) return getattr(import_module(mod), cls) def get_formset_renderer(**kwargs): renderers = get_bootstrap_setting('formset_renderers') return get_renderer(renderers, **kwargs) def get_form_renderer(**kwargs): renderers = get_bootstrap_setting('form_renderers') return get_renderer(renderers, **kwargs) def get_field_renderer(**kwargs): renderers = get_bootstrap_setting('field_renderers') return get_renderer(renderers, **kwargs)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_ToeplitzMatrix [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ToeplitzMatrix&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EigToepStruct). # ## Prepare the environment # + import os import os.path as path import sys sys.path.append(path.abspath('../../functions-legacy')) from numpy import ones, sort, argsort, diagflat, eye from numpy.linalg import eig import matplotlib.pyplot as plt from matplotlib.pyplot import figure, plot plt.style.use('seaborn') from ARPM_utils import save_plot # Inputs n_ = 200 # dimension of the matrix rho = 0.9 # decay factor # - # ## Build Toeplitz matrix t = eye(n_) for n in range(n_ - 1): t = t + rho ** n * (diagflat(ones((n_ - n, 1)), n) + diagflat(ones((n_ - n, 1)), -n)) # ## Perform spectral decomposition Diag_lambda2, e = eig(t) lambda2, index = sort(Diag_lambda2)[::-1], argsort(Diag_lambda2)[::-1] e = e[:, index] # ## Plot first eigenvectors figure() color = [[0, 0.4470, 0.7410], [0.8500, 0.3250, 0.0980],[0.9290, 0.6940, 0.1250]] for n in range(3): h = plot(e[:, n], color=color[n]) plt.grid(True); # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
import sklearn.metrics as skm import matplotlib as mp mp.use('Agg') import matplotlib.pyplot as plt import numpy as np import pandas as pd import pickle from patsy import ModelDesc, EvalFactor, Term, dmatrix from os import linesep, path from sklearn.preprocessing import Imputer from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import roc_curve, auc import logging logger = logging.getLogger("root") def build_model(data_set, data_split, no_interactions, negative, model, cross_validation, max_snps, output_dir, param_grid={}, model_eval={}): """ Builds a model for the data set :param data_set: The data set (training and testing) :param data_split: The percentage of data that should be used for testing :param no_interactions: If false interactions aren't included in the model :param negative: The negative phenotype label :param model: The model to use for training and testing the data :param cross_validation: The number of folds for k-fold cross validation :param max_snps: The maximum number of SNPs for the model to include :param output_dir: The directory to write the model artifacts in :param param_grid: The parameter matrix for the model :param model_eval: A dictionary of optional model evaluation methods """ model_config = {} # Split the data into testing and training data x = data_set.drop(labels=['phenotype'], axis=1) snp_columns = x.columns.values if (max_snps is not None) and (len(snp_columns) > max_snps): logger.warning('Too many model SNPs ({}, configured max: {}). Dropping extra SNPs.' .format(len(snp_columns), max_snps)) snp_columns = snp_columns[:max_snps] x = x[snp_columns] model_config['snps'] = snp_columns y = data_set['phenotype'] x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=data_split/float(100), random_state=1, stratify=y ) # Convert classifications to 0 and 1 # # Do this after the test train split because while the ratio of each phenotype is the same in the split, different # rows are chosen based on which phenotype is assigned a 0 and 1. Do this after the split means consistent rows # will be selected regardless of which phenotype is assigned as the negative. pheno_map = __pheno_to_binary(y_train, y_test, negative) model_config['pheno_map'] = pheno_map # Replace nan values imputer, x_train, x_test = __impute_data(x_train, x_test) model_config['imputer'] = imputer # print data counts __save_data_summary(pheno_map, y_train, y_test, len(snp_columns), output_dir) # Define model model_config['no_interactions'] = no_interactions model_desc = build_model_desc(snp_columns, no_interactions) x_train = dmatrix(model_desc, pd.DataFrame(x_train, columns=snp_columns)) x_test = dmatrix(model_desc, pd.DataFrame(x_test, columns=snp_columns)) # Fit training data to model grid = GridSearchCV(model, param_grid=param_grid, cv=cross_validation, verbose=5) grid.fit(x_train, y_train) best_model = grid.best_estimator_ model_config['model'] = best_model __save_model(model_config, output_dir) logger.info('Best estimator params found during grid search: {}'.format(grid.best_params_)) # Test model y_pred = best_model.predict(x_test) __save_confusion_matrix(y_test, y_pred, output_dir, 'testing_data') __save_confusion_matrix(y_train, best_model.predict(x_train), output_dir, 'training_data') # Optional model stats roc_probs = model_eval.get('roc') if roc_probs: __save_roc(y_test, roc_probs(best_model, x_test), output_dir) features = model_eval.get('features') if features: model_terms = __get_model_term_labels(model_desc) features(best_model, model_terms, output_dir) def __save_confusion_matrix(y_true, y_pred, output_dir, file_suffix): """ Calculates the metrics for the model prediction using a confusion matrix :param y_true: The test data provided as a numpy array :param y_pred: The test predicted by the model as a numpy array :param output_dir: The directory to write the results to :param file_suffix: The suffix for the output file name """ confusion_matrix = skm.confusion_matrix(y_true, y_pred) true_pos = confusion_matrix[1][1] true_neg = confusion_matrix[0][0] false_pos = confusion_matrix[0][1] false_neg = confusion_matrix[1][0] accuracy = float(true_pos + true_neg)/float(true_pos + true_neg + false_neg + false_pos) sensitivity = float(true_pos)/float(true_pos + false_neg) specificity = float(true_neg)/float(true_neg + false_pos) metrics = 'Confusion Matrix Metrics: {} Accuracy: {}{} Sensitivity: {}{} Specificity: {}{} ' \ 'TP: {}{} TN: {}{} FP: {}{} FN: {}{}'\ .format(linesep, np.round(accuracy, 3), linesep, np.round(sensitivity, 3), linesep, np.round(specificity, 3), linesep, np.round(true_pos, 3), linesep, np.round(true_neg, 3), linesep, np.round(false_pos, 3), linesep, np.round(false_neg, 3), linesep) logger.info(metrics) with open(path.join(output_dir, 'confusion_matrix_{}.txt'.format(file_suffix)), 'w') as metrics_file: metrics_file.write(metrics) def __pheno_to_binary(y_train, y_test, negative): """ Converts the phenotype labels to 0 and 1 :param data_set: The feature matrix :param negative: The phenotype label that should be negative :returns The phenotype numeric to string label mapping """ # Identify negative phenotype phenotypes = set(y_train) phenotypes.update(y_test) phenotypes = sorted(phenotypes) if negative is None: negative = phenotypes[0] elif negative not in phenotypes: raise ValueError('{} is an invalid negative phenotype option. Must be one of the following: {}' .format(negative, phenotypes)) # Identify positive phenotype phenotypes.remove(negative) positive = phenotypes[0] # Replace the string values with binary values y_train.replace([negative, positive], [0, 1], inplace=True) y_test.replace([negative, positive], [0, 1], inplace=True) return {0: negative, 1: positive} def __impute_data(x_train, x_test): """ Fills in the missing data. nan values will be replaced with the most frequent value for the feature :param x_train: The training data :param x_test: The test data :return: The fitted imputer, modified training and test data. """ imputer = Imputer(missing_values='NaN', strategy='most_frequent', axis=0, copy=True, verbose=1) train_snp_count = x_train.shape[1] x_train = imputer.fit_transform(x_train) if train_snp_count > x_train.shape[1]: raise ValueError('A SNP column was dropped while imputing the training set. ' 'This means the entire feature had no data. Try decreasing the invalid SNP threshold.') test_snp_count = x_test.shape[1] x_test = imputer.transform(x_test) if test_snp_count > x_test.shape[1]: raise ValueError('A SNP column was dropped while imputing the test set. ' 'This means the entire feature had no data. Try decreasing the invalid SNP threshold.') return imputer, x_train, x_test def build_model_desc(snps, no_interactions): """ Creates the model description (formula) :param snps: The selected snp labels :param no_interactions: If false, interactions will not be included in the model :return: The model description """ x_terms = [] for i in range(len(snps)): # Main effects snp_i = EvalFactor(snps[i]) x_terms.append(Term([snp_i])) if not no_interactions: for j in range(i + 1, len(snps)): # Interaction effects snp_j = EvalFactor(snps[j]) x_terms.append(Term([snp_i, snp_j])) return ModelDesc([], x_terms) def __get_model_term_labels(model_desc): term_labels = [] for term in model_desc.rhs_termlist: term_label = ':'.join(exp.code for exp in term.factors) term_labels.append(term_label) return term_labels def __save_feature_importance(model, model_desc, output_dir): """ Saves the most influential model features, sorted. Since all features are scaled to the same range the coefficients can be used to evaluate feature importance. :param model: The fitted model :param model_desc: The model description :param output_dir: The directory to write the feature importance in """ if hasattr(model, 'coef_'): term_labels = [] for term in model_desc.rhs_termlist: term_label = ':'.join(exp.code for exp in term.factors) term_labels.append(term_label) features = pd.DataFrame({'feature': term_labels, 'coefficient': model.coef_.ravel()}) features['coef_abs'] = features['coefficient'].abs() features = features[features['coef_abs'] > 0] features.sort_values(ascending=False, inplace=True, by='coef_abs') with file(path.join(output_dir, 'features.csv'), 'w') as f: f.write('intercept: {}{}{}'.format(model.intercept_, linesep, linesep)) features[['feature', 'coefficient']].to_csv(f, index=False) def __save_model(model_config, output_dir): """ Persists the imputer and model so that it can used with new data :param model_config: The dictionary containing all model objects needed to make predictions on new data :param output_dir: The directory to write the imputer and model to """ try: with open(path.join(output_dir, 'model_config.pkl'), 'w') as f: f.write(pickle.dumps(model_config)) except Exception as e: logger.info('Cannot save model: {}'.format(e)) def __save_roc(y_true, y_pred, output_dir): """ Creates an ROC curve with AUC for the model :param y_true: The actual phenotypes for the test data :param y_pred: The predicted phenotypes for the test data :param output_dir: The directory to save the ROC curve in """ fpr, tpr, thresholds = roc_curve(y_true, y_pred) roc_auc = auc(fpr, tpr) # Plot code referenced from http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.3f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC') plt.legend(loc="lower right") plt.savefig(path.join(output_dir, 'roc.png')) plt.close() def __save_data_summary(pheno_map, y_train, y_test, n_snps, output_dir): # counts for phenotypes n_neg = len(y_train[y_train == 0]) + len(y_test[y_test == 0]) n_pos = len(y_train[y_train == 1]) + len(y_test[y_test == 1]) total = n_neg + n_pos # percent of phenotypes in data p_neg = np.round(float(n_neg) / float(total) * 100, 1) p_pos = np.round(float(n_pos) / float(total) * 100, 1) # negative and positive labels neg = pheno_map[0] pos = pheno_map[1] # model data counts n_train = len(y_train) n_test = len(y_test) # format summary pheno_summary = "-- Data Summary --{}\tNegative ({}):\t{} ({}%){}\tPositive ({}):\t{} ({}%){}\tTOTAL:\t{}{}"\ .format(linesep, neg, n_neg, p_neg, linesep, pos, n_pos, p_pos, linesep, total, linesep) count_summary = "Training Count:\t{}{}Test Count:\t{}{}Number of SNP Features:\t{}{}"\ .format(n_train, linesep, n_test, linesep, n_snps, linesep) logger.info(pheno_summary) logger.info(count_summary) # write to file with open(path.join(output_dir, 'data_summary.txt'), 'w') as summary_file: summary_file.write(pheno_summary) summary_file.write(count_summary) summary_file.close()
import gc import xgboost as xgb from tqdm import tqdm class BaggedXgboost(object): def __init__(self, n_models, verbose=True): self.n_models = n_models self.verbose = verbose self.models = [] def train(self, params, dtrain, *args, **kwargs): if self.verbose: iterator = tqdm(range(self.n_models)) else: iterator = range(self.n_models) for _ in iterator: self.models.append(xgb.train(params, dtrain, *args, **kwargs)) gc.collect() return self def predict(self, dtest, *args, **kwargs): predictions = self.models[0].predict(dtest, *args, **kwargs) if self.verbose: iterator = tqdm(range(1, self.n_models)) else: iterator = range(1, self.n_models) for i in iterator: predictions += self.models[i].predict(dtest, *args, **kwargs) gc.collect() predictions = predictions / self.n_models return predictions
"""Temperature Module protocol commands.""" from .set_target_temperature import ( SetTargetTemperature, SetTargetTemperatureCreate, SetTargetTemperatureParams, SetTargetTemperatureResult, SetTargetTemperatureCommandType, ) from .await_temperature import ( AwaitTemperature, AwaitTemperatureCreate, AwaitTemperatureParams, AwaitTemperatureResult, AwaitTemperatureCommandType, ) from .deactivate import ( DeactivateTemperature, DeactivateTemperatureCreate, DeactivateTemperatureParams, DeactivateTemperatureResult, DeactivateTemperatureCommandType, ) __all__ = [ # temperatureModule/setTargetTemperature "SetTargetTemperature", "SetTargetTemperatureCreate", "SetTargetTemperatureParams", "SetTargetTemperatureResult", "SetTargetTemperatureCommandType", # temperatureModule/awaitTemperature "AwaitTemperature", "AwaitTemperatureCreate", "AwaitTemperatureParams", "AwaitTemperatureResult", "AwaitTemperatureCommandType", # temperatureModule/deactivateTemperature "DeactivateTemperature", "DeactivateTemperatureCreate", "DeactivateTemperatureParams", "DeactivateTemperatureResult", "DeactivateTemperatureCommandType", ]
from __future__ import annotations from typing import overload myint = int def sum(x: myint, y: myint) -> myint: """docstring""" return x + y @overload def mult(x: myint, y: myint) -> myint: ... @overload def mult(x: float, y: float) -> float: ... def mult(x, y): """docstring""" return x, y
import argparse import os from highlights import create_highlights, get_multiple_highlights from utils import make_dirs, find_features_layer from rl_baselines_zoo.utils import ALGOS from get_traces import load_agent from environments import Evnironments from agent_comparisons import compare_agents import logging if __name__ == '__main__': # TODO parser args: parser = argparse.ArgumentParser() parser.add_argument('--env', help='environment ID', type=str, default='CartPole-v1') parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents') parser.add_argument('--algo', help='RL Algorithm', default='ppo2', type=str, required=False, choices=list(ALGOS.keys())) parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000, type=int) parser.add_argument('--n-envs', help='number of environments', default=1, type=int) parser.add_argument('--exp-id', help='Experiment ID (default: -1, no exp folder, 0: latest)', default=-1, type=int) parser.add_argument('--verbose', help='Verbose mode (0: no output, 1: INFO)', default=1, type=int) parser.add_argument('--no-render', action='store_true', default=False, help='Do not render the environment (useful for tests)') parser.add_argument('--deterministic', action='store_true', default=False, help='Use deterministic actions') parser.add_argument('--stochastic', action='store_true', default=False, help='Use stochastic actions (for DDPG/DQN/SAC)') parser.add_argument('--norm-reward', action='store_true', default=False, help='Normalize reward if applicable (trained with VecNormalize)') parser.add_argument('--seed', help='Random generator seed', type=int, default=0) parser.add_argument('--reward-log', help='Where to log reward', default='', type=str) parser.add_argument('--gym-packages', type=str, nargs='+', default=[], help='Additional external Gym environment package modules to import (e.g. gym_minigrid)') args = parser.parse_args() """Model Parameters""" args.env = 'MsPacmanNoFrameskip-v4' # SeaquestNoFrameskip-v4, MsPacmanNoFrameskip-v4 args.algo = 'acktr' # 'a2c', 'ppo2' , 'acktr', 'dqn' args.stats_path = None args.log_dir = None args.hyperparams = {} args.deterministic = False args.stochastic = False args.max_trace_timesteps = 5000 args.verbose = 1 args.n_envs = 1 args.is_atari = True args.no_render = True """Highlights Parameters""" args.summary_traj_budget = 10 args.context_length = 2 * args.summary_traj_budget # must be even number assert args.context_length % 2 == 0, "The context range of a state must be an even number" args.minimum_gap = 10 args.trajectory_importance = "max_min" # avg , max_minus_avg, avg_delta, max_min, single_state args.state_importance = "second" # worst, second args.similarity_limit = 0 # 0 , int(args.context_length / 3) "Agent Comparison Parameters" args.important_state_percentage = 0.1 """Experiment parameters""" args.load_traces = False # args.load_trajectories = False args.random_noop_init = True args.random_noop_range = 40 args.built_in_noop = Evnironments[args.env]["built_in_noop"] # needed for creating a list of random noop starts. list must be longer than num of traces args.crop_top = Evnironments[args.env]["crop_top"] args.crop_bottom = Evnironments[args.env]["crop_bottom"] args.single_life_trace = True # trace = single life or full game args.num_traces = 20 assert args.num_traces < args.random_noop_range """Directory Parameters""" args.agents_dir = "rl_baselines_zoo/trained_agents" args.env_dir = os.path.join("output", args.env) args.stt_dir = os.path.join(args.env_dir, "states_traces_trajectories") args.video_dir = os.path.join(args.env_dir, "videos") make_dirs(args.env_dir) make_dirs(args.stt_dir) make_dirs(args.video_dir) args.file_name = str(args.num_traces) + ".pkl" args.traces_file = os.path.join(args.stt_dir, args.algo, "Traces:" + args.file_name) args.state_file = os.path.join(args.stt_dir, args.algo, "States:" + args.file_name) args.trajectories_file = os.path.join(args.stt_dir, args.algo, "Trajectories:" + args.file_name) """Bad Result Experiments""" args.map_action_reduction = False # results are much worse hen True args.rand_step = 0 # not sure if we want this """RUN""" create_highlights(args) """MULTIPLE RUNS""" # get_multiple_highlights(args) """LOADING AN AGENT""" # environment, agent = load_agent(args) """features""" # features_layer = find_features_layer(agent) """LOADING MULTIPLE AGENTS""" compare_agents(args) print()
# Generated by Django 3.0.7 on 2020-10-03 17:33 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('thesis', '0008_auto_20200909_0029'), ] operations = [ migrations.AlterField( model_name='studentgroup', name='teacher', field=models.ForeignKey(blank=True, default=None, limit_choices_to={'is_teacher': True}, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='studentgroups', to=settings.AUTH_USER_MODEL, verbose_name='Supervisor'), ), ]
import numpy as np from pymc import * model = Model() with model: k = 5 a = constant(np.array([2, 3., 4, 2, 2])) p, p_m1 = model.TransformedVar( 'p', Dirichlet.dist(a, shape=k), simplextransform) c = Categorical('c', p, observed=np.random.randint(0, k, 5)) def run(n=3000): if n == "short": n = 50 with model: step = Slice() trace = sample(n, step) if __name__ == '__main__': run()
import os, sys import bpy from mathutils import Matrix, Vector from PIL import Image from math import radians, sin, cos import numpy as np import random import json import ipdb cur_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(cur_dir) from image_utils import obtain_obj_region, obtain_obj_center from render_utils import * # Transform the R and T from numpy array to Matrix def convert_pose_array_to_matrix(R, T): mat = Matrix(R.reshape(3, 3)).to_4x4() mat.col[3][:3] = T return mat # Setup the camera def setup_camera(scene, fx=572, fy=574, cx=325, cy=242): cam = scene.objects['Camera'] width = scene.render.resolution_x height = scene.render.resolution_y cam.data.sensor_height = cam.data.sensor_width * height / width cam.data.lens = (fx + fy) / 2 * cam.data.sensor_width / width cam.data.shift_x = (width / 2 - cx) / width cam.data.shift_y = (cy - height / 2) / width # change to OpenCV camera coordinate system cam.matrix_world = Matrix(((1.0, 0.0, 0.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0), (0.0, 0.0, 0.0, 1.0))) return cam # Add material to object def add_color(obj, color=(1., 0., 0.), shadeless=True): mat = bpy.data.materials.new(name='Material') mat.use_shadeless = shadeless mat.diffuse_color = color if obj.data.materials: obj.data.materials[0] = mat else: obj.data.materials.append(mat) # Add texture map to object def add_texture_map(obj, texture_img): mat = bpy.data.materials.new(name='Material') tex = bpy.data.textures.new('UVMapping', 'IMAGE') tex.image = bpy.data.images.load(texture_img) slot = mat.texture_slots.add() slot.texture = tex if obj.data.materials: obj.data.materials[0] = mat else: obj.data.materials.append(mat) # Import 3D models from .obj files def import_models(model_files, use_defalut_texture=False): models = {} textures = {} repeat_count = {} for i in range(len(model_files)): models[i] = {} model_file = model_files[i] bpy.ops.import_scene.obj(filepath=model_file) model_name = model_file.split('/')[-1].split('.')[0] models[i]['model_name'] = model_name if model_name not in repeat_count.keys(): repeat_count[model_name] = 0 else: repeat_count[model_name] += 1 models[i]['object_name'] = model_name if repeat_count[model_name] == 0 else '{}.{:03d}'.format(model_name, repeat_count[model_name]) if use_defalut_texture: textures[model_name] = model_file.replace('.obj', '.png') return models, textures # Create random rotation matrix def rand_rotation(): # from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c theta, phi, z = np.random.uniform(size=(3,)) theta = theta * 2.0 * np.pi # Rotation about the pole (Z). phi = phi * 2.0 * np.pi # For direction of pole deflection. z = z * 2.0 # For magnitude of pole deflection. # Compute a vector V used for distributing points over the sphere # via the reflection I - V Transpose(V). This formulation of V # will guarantee that if x[1] and x[2] are uniformly distributed, # the reflected points will be uniform on the sphere. Note that V # has length sqrt(2) to eliminate the 2 in the Householder matrix. r = np.sqrt(z) V = ( np.sin(phi) * r, np.cos(phi) * r, np.sqrt(2.0 - z) ) st = np.sin(theta) ct = np.cos(theta) R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1))) # Construct the rotation matrix ( V Transpose(V) - I ) R. M = (np.outer(V, V) - np.eye(3)).dot(R) return M class RenderMachine: """Creates a python blender render machine. model_files: a list containing all the obj files out_dir: where to save the render results table_file: 3D model of the table on which all objects could be placed hide_table: use the table model only when this arg is False texture_dir: directory containing the texture map images bg_dir: directory containing the background images dim_min: the minimum model dimension in mm dim_max: the maximum model dimension in mm grid: the distance between object models on the table rad: lamp radiance to adjust the lightness clip_end: rendering range in mm """ def __init__(self, model_files, out_dir, table_file='Platte.obj', hide_table=False, texture_dir=None, bg_dir=None, dim_min=50, dim_max=150, grid=150, rad=3000, clip_end=2000, fx=572, fy=574, cx=325, cy=242, height=480, width=640): # Setting up the environment remove_obj_lamp_and_mesh(bpy.context) self.scene = bpy.context.scene self.objs = bpy.data.objects self.depthFileOutput = setup_env(self.scene, True, False, height, width, clip_end) self.camera = setup_camera(self.scene, fx, fy, cx, cy) self.lamp = make_lamp(rad) self.rad = rad self.height, self.width = height, width self.fx, self.fy, self.cx, self.cy = fx, fy, cx, cy # Import table model and align it with camera frame bpy.ops.import_scene.obj(filepath=table_file) self.table = bpy.data.objects[table_file.split('.')[0]] self.offset = [0, -grid, grid, -2 * grid, 2 * grid, -3 * grid, 3 * grid] self.hide_table = hide_table # Import 3D models and register dimension range model_files = random.choices(model_files, k=30) if len(model_files) > 30 else model_files self.models, self.textures = import_models(model_files) self.dim_min, self.dim_max = dim_min, dim_max # Read texture maps and the background images self.texture_dir = texture_dir self.textures = os.listdir(texture_dir) self.bg_dir = bg_dir self.bg_imgs = os.listdir(bg_dir) # Output setting self.out_dir = out_dir self.scene.render.image_settings.file_format = 'PNG' self.depthFileOutput.base_path = out_dir self.depthFileOutput.format.file_format = 'OPEN_EXR' # TODO: to modify in order to be complied with T-LESS where multiple objects are present def render_pose_from_annotation(self, idx, R, T): self.table.hide_render = True # Render object masks for i in range(len(self.models)): model = self.models[i]['object_name'] if model in R: self.objs[model].hide_render = False self.objs[model].matrix_world = convert_pose_array_to_matrix(R[model], T[model]) add_color(self.objs[model], color=((i + 1) * 0.01, (i + 1) * 0.01, (i + 1) * 0.01), shadeless=True) else: self.objs[model].hide_render = True self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_mask'.format(idx)) self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(idx) render_without_output(use_antialiasing=False) # Render textured image and depth map for i in range(len(self.models)): model = self.models[i]['object_name'] if model in R: add_texture_map(self.objs[model], self.textures[model]) self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(idx) self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_image'.format(idx)) render_without_output(use_antialiasing=True) def render_random_pose(self, annot, start_idx, scene_id, image_id, R, T, ele): self.table.matrix_world = convert_pose_array_to_matrix( R, T + np.array([0, 200 * sin(radians(ele)), 200 * cos(radians(ele))]) ) self.table.scale = 6, 6, 6 self.table.hide_render = self.hide_table # Randomize the lamp energy self.lamp.data.energy = np.random.uniform(self.rad * 0.5, self.rad * 1.5) / 30 Rotations, Translations, Scales = {}, {}, {} # Render object masks for i in range(len(self.models)): model = self.models[i]['object_name'] R_model = rand_rotation() T_model = T + np.array( [self.offset[i % 5], sin(radians(ele)) * self.offset[i // 5], -cos(radians(ele)) * self.offset[i // 5]] ) self.objs[model].matrix_world = convert_pose_array_to_matrix(R_model, T_model) add_color(self.objs[model], color=((i + 1) * 0.01, (i + 1) * 0.01, (i + 1) * 0.01), shadeless=True) scale = np.random.uniform(self.dim_min, self.dim_max) / max(self.objs[model].dimensions) self.objs[model].scale = scale, scale, scale Rotations[i], Translations[i], Scales[i] = R_model, T_model, scale add_color(self.table, color=(0, 0, 0), shadeless=True) self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_mask'.format(image_id)) self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(image_id) render_without_output(use_antialiasing=False) # Save mask as uint8 image mask = Image.open(os.path.join(self.out_dir, '{:04d}_mask.png'.format(image_id))).convert('L') mask.save(os.path.join(self.out_dir, '{:04d}_mask.png'.format(image_id))) # Render textured image and depth map for i in range(len(self.models)): model = self.models[i]['object_name'] add_texture_map(self.objs[model], os.path.join(self.texture_dir, random.choice(self.textures))) # Generate the sample annotation sample_frame = {} sample_frame["scene_id"] = scene_id sample_frame["image_id"] = image_id sample_frame["obj_id"] = i sample_frame["model_path"] = self.models[i]['model_name'] sample_frame["scale"] = Scales[i] sample_frame["cam_R_m2c"] = list(Rotations[i].reshape(-1)) sample_frame["cam_t_m2c"] = list(Translations[i]) cx, cy, outside = obtain_obj_center(Translations[i], self.fx, self.fy, self.cx, self.cy, self.height, self.width) sample_frame["obj_center"] = [cx, cy] sample_frame["obj_outside"] = outside bbox, px_visib, occupy_fract = obtain_obj_region(os.path.join(self.out_dir, '{:04d}_mask.png'.format(image_id)), i) sample_frame["bbox"] = bbox sample_frame["px_visib"] = px_visib sample_frame["occupy_fract"] = occupy_fract annot['{}'.format(start_idx + i)] = sample_frame add_texture_map(self.table, os.path.join(self.bg_dir, random.choice(self.bg_imgs))) self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_image'.format(image_id)) render_without_output(use_antialiasing=True) if __name__ == '__main__': # input and output directory model_dir = '/media/xiao/newhd/XiaoDatasets/ABC/abc_0000' out_dir = '/media/xiao/newhd/XiaoDatasets/ABC/synthetic_data_0000' scene_id = len(os.listdir(out_dir)) out_dir = os.path.join(out_dir, '{:06d}'.format(scene_id)) images_per_scene = 100 # textures and backgrounds directory texture_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'textures') bg_dir = '/media/xiao/newhd/XiaoDatasets/PascalVOC/VOC2012/JPEGImages' # TODO: consider mutilple instances of the same shape model_files = [name for name in os.listdir(model_dir) if os.path.getsize(os.path.join(model_dir, name)) / (2 ** 20) < 10] model_number = np.random.randint(5, 25) model_files = random.choices(model_files, k=model_number) model_files = [os.path.join(model_dir, name) for name in model_files] render_machine = RenderMachine(model_files, out_dir, texture_dir=texture_dir, bg_dir=bg_dir, rad=3000) # Load table poses from the LINEMOD-OCCLUSION dataset table_poses = np.load('table_poses.npz') R = table_poses['R'] T = table_poses['T'] Ele = table_poses['Ele'] idx = np.random.randint(0, R.shape[0], size=(images_per_scene,)) # Read in annotation json file annotation_file = '/media/xiao/newhd/XiaoDatasets/ABC/annotation_0000.json' if os.path.isfile(annotation_file): annot = json.load(open(annotation_file)) start_idx = len(annot) else: annot = {} start_idx = 0 for i in range(len(idx)): render_machine.render_random_pose( annot, start_idx + i * model_number, scene_id, i, R[idx[i], :], T[idx[i], :], Ele[idx[i]]) with open(annotation_file, 'w') as f: json.dump(annot, f, indent=4) os.system('rm blender_render.log')
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ServerUpdate(Model): """An update request for an Azure SQL Database server. Variables are only populated by the server, and will be ignored when sending a request. :param administrator_login: Administrator username for the server. Once created it cannot be changed. :type administrator_login: str :param administrator_login_password: The administrator login password (required for server creation). :type administrator_login_password: str :param version: The version of the server. :type version: str :ivar state: The state of the server. :vartype state: str :ivar fully_qualified_domain_name: The fully qualified domain name of the server. :vartype fully_qualified_domain_name: str :param tags: Resource tags. :type tags: dict[str, str] """ _validation = { 'state': {'readonly': True}, 'fully_qualified_domain_name': {'readonly': True}, } _attribute_map = { 'administrator_login': {'key': 'properties.administratorLogin', 'type': 'str'}, 'administrator_login_password': {'key': 'properties.administratorLoginPassword', 'type': 'str'}, 'version': {'key': 'properties.version', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'fully_qualified_domain_name': {'key': 'properties.fullyQualifiedDomainName', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__(self, *, administrator_login: str=None, administrator_login_password: str=None, version: str=None, tags=None, **kwargs) -> None: super(ServerUpdate, self).__init__(**kwargs) self.administrator_login = administrator_login self.administrator_login_password = administrator_login_password self.version = version self.state = None self.fully_qualified_domain_name = None self.tags = tags
# -*- coding: utf-8 -*- import os import pandas __author__ = 'Petr Belohlavek <[email protected]>' print() class PscKonvertor: """Konvertuje postovni smerovaci cisla na prislusne okresy a kraje. Vyhledavani je pro maximalni rychlost indexovane.""" _MODULE_PATH = os.path.dirname(os.path.abspath(__file__)) _DATA_PATH = os.path.join(_MODULE_PATH, 'data') def __init__(self, psc2okres_f=os.path.join(_DATA_PATH, 'psc2okres.csv'), okres2kraj_f=os.path.join(_DATA_PATH, 'okres2kraj.csv')): """CSV tabulky se sloupci PSC,Okres a Okres,Kraj. Volitelne mohou obsahovat i dalsi sloupce.""" self.psc2okres_ = pandas.read_csv(psc2okres_f, header=0, encoding='utf-8') self.psc2okres_ = self.psc2okres_.set_index(['PSC']) self.okres2kraj_ = pandas.read_csv(okres2kraj_f, header=0, encoding='utf-8') self.okres2kraj_ = self.okres2kraj_.set_index(['Okres']) def psc2okres(self, psc): """Prevede `pcs` na okres, ve kterem dana obec lezi.""" psc_zaznamy = self.psc2okres_.loc[psc] zaznam = {} if type(psc_zaznamy) == pandas.core.frame.DataFrame: zaznam = psc_zaznamy.iloc[0] elif type(psc_zaznamy) == pandas.core.series.Series: zaznam = psc_zaznamy else: raise KeyError('Unexpected type') return zaznam['Okres'] def okres2kraj(self, okres): """Prevede `okres` na kraj, ve kterem dany okres lezi.""" if 'Praha' in okres: return 'Hlavní město Praha' else: kraj_zaznam = self.okres2kraj_.loc[okres] return kraj_zaznam['Kraj'] def psc2kraj(self, psc): """Prevede `pcs` na kraj, ve kterem dana obec lezi.""" okres = self.psc2okres(psc) return self.okres2kraj(okres)
import pytest from eth2.beacon.state_machines.forks.serenity import ( SerenityStateMachine, ) from eth2.beacon.state_machines.forks.xiao_long_bao import ( XiaoLongBaoStateMachine, ) @pytest.mark.parametrize( "sm_klass", ( SerenityStateMachine, XiaoLongBaoStateMachine, ) ) def test_sm_class_well_defined(sm_klass): state_machine = sm_klass(chaindb=None, slot=None) assert state_machine.get_block_class()
"""This program was created on 12/04/2015 It takes an user inputted string encrypts it with the Transposition Cipher and emails it to the users choice of person https://www.facebook.com/AiiYourBaseRBel0ngToUs """ # SECURITY NOTICE # THE EMAIL SENDS THE KEY NUMBER # GET RID OF "myKey" in msg under main() # to fix this import sys import smtplib import random import time import transpositionEncrypt def main(): message = str(input("enter message here ")) Email(message) def Email(msg, toaddrs): # gets email to send to # i've put it here so timer isn't disrupted # starts a timer startTime = time.time() # Encrypts message with random key # TODO use Affine cipher as more secure myKey = random.randint(1, 26) msg = transpositionEncrypt.encryptMessage(myKey, msg) # Email credentials & the message with KEY fromaddr = '' print("\nThis may take a few seconds") username = '' password = '' KeySTR = str(myKey) msg = ("The key is ") + KeySTR + ("\n\n") + msg # The actual mail send server = smtplib.SMTP('smtp.gmail.com:587') server.starttls() server.login(username, password) server.sendmail(fromaddr, toaddrs, msg) server.quit() # stops timer and prints time totalTime = round(time.time() - startTime, 2) print(totalTime) # closes program see close() close() def close(): print("\nProgram is now exiting\n") sys.exit() if __name__ == '__main__': main()
from .table import TableCreate, TableOut, TableUpdate, TableBase # noqa
test = { 'name': 'q3_1_4', 'points': 1, 'suites': [ { 'cases': [ {'code': '>>> type(relative_risk(NHS)) in set([float, np.float64])\nTrue', 'hidden': False, 'locked': False}, {'code': '>>> np.isclose(round(float(relative_risk(NHS)), 3) - 0.474, 0)\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
#merges observational notation from Gsheet link = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQ5JRPuanz8kRkVKU6BsZReBNENKglrLQDj1CTWnM1AqpxdWdWb3BEEzSeIcuPq9rSLNwzux_1l7mJb/pub?gid=1668794547&single=true&output=csv' import sys import numpy as np import pandas as pd import os.path from datetime import datetime import glob from scipy import stats from tqdm import tqdm if len(sys.argv) < 2: print("Usage: python 3_extradata.py [sample rate]") sys.exit(1) samprt = sys.argv[1] #samprt = ""'5T'"" link = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQ5JRPuanz8kRkVKU6BsZReBNENKglrLQDj1CTWnM1AqpxdWdWb3BEEzSeIcuPq9rSLNwzux_1l7mJb/pub?gid=1668794547&single=true&output=csv' #observation = pd.read_csv(link) observation = pd.read_csv(link, parse_dates=["Timestamp_Overrode"], index_col=["Timestamp_Overrode"]) observation.index = observation.index.tz_localize('America/New_York').tz_convert('UTC') notes= pd.DataFrame(observation[['note','sensor','Coord_X_m', 'Coord_Y_m', 'Coord_Z_m','Position_HumanReadable']]) notes.sort_index( inplace=True ) notes = notes["2019-09-05 ":"2019-12-10 "] #set paths base_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) print("base: " +base_path) start_path = os.path.join(base_path, "CSV", "2Interim", "2_cbas_post_SD_resample","resampled(" + samprt + ")") print("start: " + start_path) final_path_out = os.path.join(base_path, "CSV", "2Interim", "3_extradata") print("final: " + final_path_out) fn = 'protoCBAS-*' path = sorted(glob.glob(os.path.join(start_path, fn))) #print("path: "+ str(path)) dfs = [pd.read_csv(f, parse_dates=["timestamp"], index_col=["timestamp"]).assign(sensor=f) for f in tqdm(path,desc="Reading CSVs...")] availablecolumns = pd.Series(dfs[0].columns) print("Loaded sensor data for " + str(len(dfs)) + " sensors") ## filtering directory/file extensions stripboard = ((len(start_path)+1)) # getting the length of the path up to where glob fills in filenames for d in dfs: d.sensor = d.sensor.str.slice(start=stripboard).str.replace(".csv", "") sensors = [d["sensor"][0] for d in dfs] shape = [d.shape for d in dfs] print("From file ---"+str(sensors)) print(" df shape ---"+str(shape)) dfsNoted = [pd.merge(d,notes.where(notes.sensor.str.contains(d["sensor"][1])) ,left_index=True, right_index=True, how = 'outer',suffixes=('_SD', '_note')) for d in tqdm(dfs,desc="Merging notes...")] # limit for forward fill so big gaps dont have unessesary metadata # may need to adjust with different ranges/samplerates/gaps #1440 m in 24 hrs #720 in 12hrs #144 5Ts in 720 #24 30Ts in 720 #144 5Ts in 1440 #24 30Ts in 1440 ffilllimit = None # maximum number of consecutive NaN values to forward/backward fill. # In other words, if there is a gap with more than this number of consecutive NaNs, # it will only be partially filled. dfsNoted_ffill = [d.fillna(method='ffill',limit=ffilllimit) for d in tqdm(dfsNoted,desc="ffill...")] for i in range(len(dfsNoted)): dfsNoted[i][['sensor_SD', 'note', 'sensor_note', 'Coord_X_m', 'Coord_Y_m', 'Coord_Z_m', 'Position_HumanReadable']] = dfsNoted_ffill[i][['sensor_SD', 'note', 'sensor_note', 'Coord_X_m', 'Coord_Y_m', 'Coord_Z_m', 'Position_HumanReadable']] #idea is to ff/bf only these columns and not sensor data dfsNoted_sensorbfill = [d.fillna(method='bfill') for d in tqdm(dfsNoted,desc="bfill...")] # fill sensor colums up to 0 for i in range(len(dfsNoted)): dfsNoted[i][['sensor_SD','sensor_note']] = dfsNoted_sensorbfill[i][['sensor_SD','sensor_note']] def numerictimes(d): d['Wkdy(EST)'] = pd.to_numeric(d.index.tz_convert('America/New_York').strftime('%w')) d['Hour(EST)'] = pd.to_numeric(d.index.tz_convert('America/New_York').strftime('%H')) d['Month(EST)'] = pd.to_numeric(d.index.tz_convert('America/New_York').strftime('%m')) d['TOD(EST)'] = pd.to_numeric(d.index.tz_convert('America/New_York').strftime('%H''%M')) d['DOY(EST)'] = pd.to_numeric(d.index.tz_convert('America/New_York').strftime('%j')) #d['DWkdy(EST)'] = pd.to_numeric(d.index.tz_convert('America/New_York').strftime('%w')) return d print("Numeric time colums....") #dfsNoted = list(map(numerictimes, dfsNoted)) dfsNoted = [numerictimes(d) for d in tqdm(dfsNoted,desc="Numeric time...")] for i in tqdm(range(len(dfsNoted)),desc="Writing CSVs..."): dfsNoted[i].to_csv(os.path.join(final_path_out,sensors[i]+".csv"),index_label="timestamp") dfsensors = [d["sensor_SD"][0] for d in dfsNoted] notessensors = [d["sensor_note"][0] for d in dfsNoted] print("From DF ---"+str(dfsensors)) print("From Note ---"+str(notessensors)) print("done! " + "sample rate-"+samprt)
from rlcard.utils.utils import print_card from rlcard.games.base import Card class HumanAgent(object): ''' A human agent for Hearts. ''' def __init__(self, num_actions): ''' Initilize the human agent Args: num_actions (int): the size of the output action space ''' self.use_raw = True @staticmethod def step(state): ''' Human agent will display the state and make decisions through interfaces It is always assumed that the human player is player 0 Args: state (dict): A dictionary that represents the current state Returns: action (int): The action decided by human ''' raw_state = state['raw_obs'] passing_cards = raw_state['passing_cards'] pass_player_id = raw_state['passing_cards_players_to_left'] trick = raw_state['trick'] print('\n=============== State ===============') print('Hearts broken: {}'.format(raw_state['hearts_are_broken'])) if raw_state['can_sluff']: print('You get to sluff this trick') if not passing_cards: print('\n=============== Trick ===============') print_card(trick) print('\n=============== Your Hand ===============') hand_list = sorted(list(raw_state['player_hand']), key=lambda card: '{}{:02d}'.format(card.suit, Card.valid_rank.index(card.rank))) print_card(hand_list) print('\n=========== Actions You Can Choose ===========') actions_by_number = {} current_action_number = 0 for suit in Card.valid_suit: for rank in Card.valid_rank: action = '{}{}'.format(rank, suit) if action in state['raw_legal_actions']: if passing_cards: print('{}: Pass {} to Player {}'.format(current_action_number, action, pass_player_id)) else: print('{}: Play {}'.format(current_action_number, action)) actions_by_number[current_action_number] = action current_action_number = current_action_number + 1 print('') input_action_string = input('>> You choose action (integer): ') action_number = int(input_action_string) if input_action_string.isnumeric() else -1 while action_number < 0 or action_number >= len(state['legal_actions']): print('Action illegal...') input_action_string = input('>> Re-choose action (integer): ') action_number = int(input_action_string) if input_action_string.isnumeric() else -1 return actions_by_number[action_number] def eval_step(self, state): ''' Predict the action given the current state for evaluation. The same to step here. Args: state (numpy.array): an numpy array that represents the current state Returns: action (int): the action predicted (randomly chosen) by the random agent ''' return self.step(state), {}
import numpy as np from numba import jit from multiprocessing import Pool from functools import partial @jit(forceobj=True) def construct_bin_column(x: np.array, max_bins: int) -> np.array: x, cnt = np.unique(x, return_counts=True) sum_cnt = np.sum(cnt) if len(x) == 1: return np.array([], 'float64') elif len(x) == 2: bins = (x[0]*cnt[0] + x[1]*cnt[1]) / sum_cnt return np.array([bins], 'float64') elif len(x) <= max_bins: bins = np.zeros(len(x)-1, 'float64') for i in range(len(x)-1): bins[i] = (x[i] + x[i+1]) / 2.0 return bins elif len(x) > max_bins: cnt = np.cumsum(cnt) t, p = 0, len(x) / float(max_bins) bins = np.zeros(max_bins-1, 'float64') for i in range(len(x)): if cnt[i] >= p: bins[t] = x[i] t += 1 p = cnt[i] + (sum_cnt - cnt[i]) / float(max_bins-t) if t == max_bins-1: break return bins def map_bin_column(x, bins): bins = np.insert(bins, 0, -np.inf) bins = np.insert(bins, len(bins), np.inf) return np.searchsorted(bins, x, side='left').astype('uint16') - 1 def _get_bins_maps(x_column: np.array, max_bins: int) -> tuple: bins = construct_bin_column(x_column, max_bins) maps = map_bin_column(x_column, bins) return (bins, maps) def get_bins_maps(x: np.array, max_bins: int, threads: int =1) -> (list, np.array): out = [] if threads==1: for i in range(x.shape[-1]): out.append(_get_bins_maps(x[:, i], max_bins)) else: x = list(np.transpose(x)) pool = Pool(threads) f = partial(_get_bins_maps, max_bins=max_bins) out = pool.map(f, x) pool.close() bins, maps = [], [] while out: _bin, _map = out.pop(0) bins.append(_bin) maps.append(_map) return bins, np.stack(maps, axis=1) if __name__ == '__main__': x = np.random.rand(10000, 10) bins, maps = get_bins_maps(x, 8, 2) bin = bins[0] print(bin)
from input_device import InputDevice import core.app.app_server as app_server class AppDevice(InputDevice): """ Simple wrapper around the app server """ def __init__(self, host, port): super(AppDevice, self).__init__() self.host=host self.port=port def main(self, output_devices, fft_in_queue): app_server.run(self.host, self.port, output_devices, fft_in_queue)
# -*- coding:utf-8 -*- """ 在插件热插拔的基础上实现中间件 PS: 需要注意的是,插件目录如果有同名文件,则只会导入第一个目录中找到的文件 """ import copy import logging import loader from inspect import isfunction, getargspec # 中间件 class Middleware(object): def __init__(self, logger=logging): self.logger = logger self.funcList = [] # 要执行的方法列表。示例:[funcA, funcB] self.funcParam = {} # 存放主函数执行过程所需参数。示例:{funcA:{"task":1, "taskPolicy":2}} self.plugin = {} # 方法要执行的插件列表。示例:{funcA:{"before":[pluginA, pluginB], "after":[pluginC]} self.pluginParam = {} # 各插件相关信息。示例: {funcA:{"before":[{"pluginDir":"dirA", "loop":False}, {"pluginDir":"dirB","loop":True}]}} self.pluginExecParam = {} # 插件执行过程所需参数集。示例:{"task":1, "taskPolicy":2} # 在方法列表末尾添加新的对象 def funcAppend(self, func): if not isfunction(func): raise TypeError("func must be callable") if func not in set(self.funcList): self.funcList.append(func) # 从方法列表中找出某个值第一个匹配项的索引位置 def funcIndex(self, func): if not isfunction(func): raise TypeError("func must be callable") self.funcList.index(func) # 将对象插入列表 def funcInsert(self, index, func): if not isfunction(func): raise TypeError("func must be callable") if func not in set(self.funcList): self.funcList.insert(index, func) # 移除方法列表中的一个元素(默认最后一个元素),并且返回该元素的值 def funcPop(self, index=-1): return self.funcList.pop(index) # 移除列表中某个值的第一个匹配项 def funcRemove(self, func): if not isfunction(func): raise TypeError("func must be callable") self.funcList.remove(func) # 返回方法名称列表 @property def funcNameList(self): funcName = [i.__name__ for i in self.funcList] return funcName # 给特定函数添加参数 def addParam2Func(self, func, **kwargs): # 获取函数名 funcName = func if isfunction(func): funcName = func.__name__ if funcName not in self.funcNameList: raise ValueError("%s not in funcList, please add func to middleware first" % funcName) param = self.funcParam.get(funcName, {}) for k, v in kwargs.items(): # 存在则更新,不存在则新增 param[k] = v self.funcParam[funcName] = param # 判断插件目录是否已存在 def dirExist(self, funcName=None, pluginDir=None, position=None): # 插件目录是否存在,以及插件下标 index = 0 flag = False # 判断插件参数是否已存在 pluginParam = self.pluginParam.get(funcName, {}) dirName = [i["pluginDir"] for i in pluginParam.get(position, [])] if pluginDir in dirName: flag = True index = dirName.index(pluginDir) return flag, index # 向方法添加插件 def addPlugin2Func(self, func=None, pluginDir=None, loop=None, position=None): # 参数校验 if not (position and not (position not in ("before", "after"))): raise ValueError("position must be none or in ['before', 'after']") elif not position: position = "before" # 获取函数名 funcName = func if isfunction(func): funcName = func.__name__ if funcName not in self.funcNameList: raise ValueError("%s not in funcList, please add func to middleware first" % funcName) # 存储参数 pluginParam = self.pluginParam.get(funcName, {}) if not pluginParam: pluginParam[position] = [{"pluginDir": pluginDir, "loop": loop}] else: exsit, index = self.dirExist(funcName=funcName, pluginDir=pluginDir, position=position) if exsit: pluginParam[position][index] = {"pluginDir": pluginDir, "loop": loop} else: if pluginParam.get(position): pluginParam[position].append({"pluginDir": pluginDir, "loop": loop}) else: pluginParam[position] = [{"pluginDir": pluginDir, "loop": loop}] self.pluginParam[funcName] = pluginParam # 从方法删除插件 def deletePlugin2Func(self, func=None, pluginDir=None, position=None): # 参数校验 if not (position and not (position not in ("before", "after"))): raise ValueError("position must be none or in ['before', 'after']") elif not position: position = "before" # 获取函数名 funcName = func if isfunction(func): funcName = func.__name__ if funcName not in self.funcNameList: return # 删除插件 pluginParam = self.pluginParam.get(funcName, {}) if not pluginParam: return # 如果没有传具体插件目录则删除整个插件位置 if not pluginDir: pluginParam.pop(position) else: exsit, index = self.dirExist(funcName=funcName, pluginDir=pluginDir, position=position) if exsit: pluginParam[position].pop(index) # 向插件添加参数集 def addParam2Plugin(self, **kwargs): for k, v in kwargs.items(): # 存在则更新,不存在则新增 self.pluginExecParam[k] = v # 向方法添加插件 def updatePlugin(self): """pluginParam参数示例: { "funcA": { "before": [ { "pluginDir": "dirA", "loop": false }, { "pluginDir": "dirB", "loop": true } ], "after": [ { "pluginDir": "dirC", "loop": false } ] } } """ for funcName, param in self.pluginParam.items(): pluginsInfo = {} self.plugin[funcName] = pluginsInfo for position, dirInfo in param.items(): plugins = [] pluginsInfo[position] = plugins for d in dirInfo: loaderObj = loader.PluginLoader() loaderObj.loadPlugins(pluginDir=d["pluginDir"], loop=d["loop"]) plugins.extend(loaderObj.plugins) # 返回函数调用链 def funcCallChain(self, func=None): self.updatePlugin() # 更新插件 callChain = [] # 获取函数名 if not func: raise ValueError("func can not be none") funcName = func if isfunction(func): funcName = func.__name__ # 查询调用链 if funcName not in self.funcNameList: return callChain callChain.extend([i.keys()[0] for i in self.plugin[funcName].get("before", []) if i]) callChain.append(funcName) callChain.extend([i.keys()[0] for i in self.plugin[funcName].get("after", []) if i]) return callChain # 函数执行。跟参数进行绑定,然后执行 def callFunc(self, func, plugin=False): # 函数签名 signature = getargspec(func) lp, dp = copy.deepcopy(signature[0]), copy.deepcopy(signature[3]) # 函数没有参数.则直接执行 if len(signature[0]) == 0: return func() # 获取对应参数 funcName = func.__name__ if not plugin: param = self.funcParam.get(funcName, {}) else: param = self.pluginExecParam # 组织相应参数 dpLen = len(dp) if dp else 0 listParamName = lp[:len(lp) - dpLen] if dp: dictParam = dict(zip(lp[-dpLen:], dp)) else: dictParam = {} # 获取*args参数 listParam = [param.get(i, "tvmFlag") for i in listParamName] # 更新**kwargs参数 tDParam = copy.deepcopy(dictParam) for i in tDParam: if param.get(i): dictParam[i] = param[i] # 对参数进行判断 if len(listParam) == 0 and len(dictParam) == 0: raise ValueError("func %s%s need %s param, 0 provide" % (funcName, tuple(lp), len(lp))) elif "tvmFlag" in listParam: raise ValueError("func %s%s param %s not provide" % ( funcName, tuple(lp), lp[listParam.index("tvmFlag")])) elif "tvmFlag" in dictParam.values(): tParam = None for k, v in dictParam.items(): if "tvmFlag" == v: tParam = k break raise ValueError("func %s%s's %s param not provide" % (funcName, tuple(signature[0]), tParam)) return func(*listParam, **dictParam) # 执行函数 def process(self): self.updatePlugin() # 更新插件 for func in self.funcList: funcName = func.__name__ plugins = self.plugin.get(funcName) if not plugins: continue # 提权函数执行前后插件 beforePlugins = plugins.get("before", []) afterPlugins = plugins.get("after", []) # 函数前插件 flag = True # 标识插件是否继续执行 for p in beforePlugins: n, m = p.items()[0] method = getattr(m["module"], "run", None) if isfunction(method): flag = self.callFunc(method, plugin=True) if not flag: break # 执行主函数 if not flag: continue # 执行下一个主函数 rst = self.callFunc(func) if rst["errCode"] != 0: return # 函数后插件 for p in afterPlugins: n, m = p.items()[0] method = getattr(m["module"], "run", None) if isfunction(method): flag = self.callFunc(method, plugin=True) if not flag: break
from django.apps import AppConfig class HeadsupappConfig(AppConfig): name = 'HeadsUpApp'
#!/usr/bin/python # -*- coding: utf-8 -*- """ tintri_flr.py ~~~~~~~~~~~~~~~~~~~~~~~~ CLI programm to Map and Unmap a Tintri Snapshot to a virtual LINUX machine Requirements: The programm needs "root" rights or "sudo" to run. This is needed because it uses the commands "mount" and "unmount". The Tintri http_api is also needed! Functionality: First of all the programm let you choose the vmware virtual machine name of your LINUX machine. After that you will get a list of all usable Snapshots for this machine. The Snapshot-disks will be mapped to the running system. After the "mapping" you can step into the Snapshot partitions and copy out the "old" data. The programm generates a MountPoint for each partition in the "TINTRI_RECOVER_DIR" (You can define this directory further down. The needed status information for a successfull unmapping is stored in the directory which is defined in the variable "dbg_path". Syntax: A complete Syntax information and all options are listet if you call the the "usage" of the command itself use: tintri_flr.py -? :copyright: 2016 Schaefer & Tobies SuC GmbH. :author: Uwe W. Schaefer <[email protected]> :license: LGPL, see LICENSE for details. """ #=============================================================================== # Changeable Part #=============================================================================== TINTRI_RECOVER_DIR = "/tintri_recover" dbg_path = "/root/.tintri" #=============================================================================== # END OF user changeable Part #=============================================================================== # # uws: 01.12.2015 # File Level Recovery Support for Tintri FLR Snapshot Feature # # uws: 07.03.2016 # added Tintri-HTTP-API calls with the schtob/tintri/http_api library # # New Functionality: # # now the script asks for the virtual machine name, if the name is not found # in the virtual machine name list. # # the scripts lists the tintri snapshots and askes which one should be # restored for FLR. # # the reset Option removes the mounts and after that it removes the # synced snapshot-disks from the VM # import sys, os try: # check if the schtob/tintri api lib is installed from schtob.tintri_http_api import TFilers except: # check if this is a copy from GitHub without installation if os.path.isdir('../src/schtob'): sys.path.append('../src') from schtob.tintri_http_api import TFilers else: print "Installation Error: Couln't find the schtob Tintri API Lib" sys.exit(1) import getopt, pickle from os.path import join, basename, islink from subprocess import Popen, PIPE from time import strftime, localtime from schtob.tintri_http_api import constants from schtob.tintri_http_api import errors Version = "1.1.0" DISK_BY_PATH = "/dev/disk/by-path" SCSI_SEARCH_PATH = "/sys/class/scsi_host" MOUNT = "/bin/mount" UMOUNT = "/bin/umount" FDISK = "/sbin/fdisk -l" PING = '/bin/ping' first_fdisk_file = join(dbg_path, 'first_fdisk_info.pickle') verbose = 1 dbg_fd = None def get_disk_mnt_info(): """ Get partition infos for the actual visible disks """ part_info = {} # # get the mount information for the disk partitions # proc = Popen(MOUNT, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = True) for l in proc.stdout: if verbose > 2: print "l: %s" % l.strip() parms = l.split() if parms[0].find('/dev/') == 0: part_info[basename(parms[0])] = {'mnt_path' : parms[2]} if not os.path.exists(DISK_BY_PATH): print "ERROR: couldn't find directory \'%s\'" % DISK_BY_PATH sys.exit(5) for part in os.listdir(DISK_BY_PATH): fname = join(DISK_BY_PATH, part) if islink(fname): dev = basename(os.readlink(fname)) if dev in part_info.keys(): part_info[dev]['disk_path'] = part else: print "ERROR: disk_path isn't a symbolic link: \'%s\'" % fname sys.exit(6) if verbose > 1: print part_info return part_info def get_fdisk_info(dbg_fd, old_info=None): """ Get FDISK info for machine """ fdisk_info = {} proc = Popen(FDISK, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = True) for l in proc.stdout: if verbose > 1: print "fdisk: %s" % l.strip() dbg_fd.write(l) if l.find('Disk') == 0 and '/dev' in l: # new Disk Information parms = l.split() disk = basename(parms[1].strip(':')) if old_info and disk in old_info: old_disk = True # Disk Info exists and should not be added # into the "new" dict continue else: old_disk = False fdisk_info[disk] = { 'size' : "%s %s" % ( parms[2], parms[3].strip(',')), 'partitions' : [], } elif l.find('/dev/') == 0: if old_disk: continue boot = False parms = l.split() if parms[1] == '*': # boot flag second_param = 2 boot = True else: second_param = 1 part_info = { 'name' : basename(parms[0]), 'boot' : boot, 'start': parms[second_param], 'end' : parms[second_param+1], } fdisk_info[disk]['partitions'].append(part_info) else: continue if verbose > 1: print """fdisk_info: %s """ % fdisk_info return fdisk_info def search_for_new_disks(): """ send scsi search command to find new disks """ for d in os.listdir(SCSI_SEARCH_PATH): scan_file = join(SCSI_SEARCH_PATH, d, "scan") cmd = "echo \"- - -\" >%s" % scan_file if verbose > 1: print "search_for_new_disks: cmd : %s" % cmd os.system(cmd) def init_env(quiet=False): """ initialize dbg area in "root" dir """ global dbg_fd first_fdisk = None mnt_info = None filername = None vm_name = os.uname()[1] if not 'linux' in sys.platform.lower(): print "ERROR: this script is only usable on a LINUX system" sys.exit(99) if not os.path.isdir(dbg_path): os.makedirs(dbg_path) dbg_file = join(dbg_path, 'flr.dbg') dbg_fd = open(dbg_file, 'w') dbg_fd.write(""" ================================================================================ Start of script: %s Version. %s """ % (strftime("%x %X "), Version)) cache_info = {} if os.path.exists(first_fdisk_file): msg = """ INFO: orig_fdisk info allready exists! I will use this \'old\' info """ dbg_fd.write("%s\n" % msg) if verbose and not quiet: print msg fd = open(first_fdisk_file, 'r') pic = pickle.Unpickler(fd) cache_info['first_fdisk'] = pic.load() cache_info['mnt_info'] = pic.load() cache_info['filername'] = pic.load() cache_info['vm_name'] = pic.load() cache_info['active'] = pic.load() fd.close() dbg_fd.write("""OLD fdisk info: %s \n""" % first_fdisk) return cache_info def write_first_fdisk(first_fdisk, mnt_info, filername=None, vm_name=None, active=False): """ Write the First fdisk information into a python pickle file """ fd = open(first_fdisk_file, 'w') pic = pickle.Pickler(fd) pic.dump(first_fdisk) pic.dump(mnt_info) pic.dump(filername) pic.dump(vm_name) pic.dump(active) fd.close() def mount_snap_disks(first_disk, new_disk, mnt_info): """ Mount the new found partitions on "recover" paths """ global TINTRI_RECOVER_DIR # Plausi if not len(first_disk.keys()) == len(new_disk.keys()): print """ERROR: Number of disks before snapshot Recover is not equal the number of the newly found disks: Nr of primary disks : %s Nr of new Disks : %s """ % (len(first_disk.keys()), len(new_disk.keys())) sys.exit(8) for disk in first_disk: for part in first_disk[disk]['partitions']: for ndisk in new_disk: if first_disk[disk]['size'] == new_disk[ndisk]['size'] and\ len(first_disk[disk]['partitions']) == \ len(new_disk[ndisk]['partitions']): # OK Size and nr. of partitions is equal search the # correct partition and mount it for npart in new_disk[ndisk]['partitions']: if part['boot'] == npart['boot'] and \ part['start'] == npart['start'] and \ part['end'] == npart['end']: if part['name'] not in mnt_info: # swap or other not mounted partition continue mnt_name = mnt_info[part['name']]['mnt_path'] if mnt_name == '/': mnt_name = 'SLASH' mnt_dir = join(TINTRI_RECOVER_DIR, basename(mnt_name)) cmd = "%s /dev/%s %s" % (MOUNT, npart['name'], mnt_dir) if verbose: print cmd if not os.path.isdir(mnt_dir): os.makedirs(mnt_dir) os.system(cmd) def reset(reset_all=False, log_lvl=None): """ unmount the FLR Partitions reset_all : the pickle info will be removed too """ if os.path.isdir(TINTRI_RECOVER_DIR): for fname in os.listdir(TINTRI_RECOVER_DIR): mnt_dir = join(TINTRI_RECOVER_DIR, fname) if os.path.ismount(mnt_dir): cmd = "%s -l %s" % (UMOUNT, mnt_dir) if verbose: print "... " + cmd os.system(cmd) if reset_all: os.rmdir(mnt_dir) # # remove the mapped disks from virtual machine # try: cache_info = init_env(quiet=True) except: cache_info = {} if 'filername' in cache_info: if verbose > 0: print "... Remove mapped snapshot disks from virtual machine" try: filer = init_connection(cache_info['filername'], log_lvl) filer.del_flr_disks(cache_info['vm_name']) except: pass if reset_all: if os.path.exists(first_fdisk_file): os.unlink(first_fdisk_file) if os.path.exists(TINTRI_RECOVER_DIR): os.rmdir(TINTRI_RECOVER_DIR) else: write_first_fdisk(cache_info['first_fdisk'], cache_info['mnt_info'], cache_info['filername'], cache_info['vm_name'], active=False) sys.exit(0) def ping(address): import subprocess args = [PING, '-c', '1', address] cmd = "%s -c 1 %s" % (PING, address) popen = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE) try: stdoutdata, stderrdata = popen.communicate() except: import traceback print cmd traceback.print_exc() return False if popen.returncode == 0: return True elif popen.returncode == 2: print("no response from %s" % address) return False else: print("ping to %s failed" % address) return False def check_tfiler(filer): """ Get the name of the Tintri Storage-System (Filername) """ filername = None while not filername: if not filer: filer = raw_input("""Please enter the Tintri Storage-System-Name or address: """) if not filer or len(filer) == 0: print("No name given; Giving Up") sys.exit(255) if ping(filer): filername = filer else: print("""Could not reach Storage-System : \'%s\' """ % filer) filer = None return filername def get_vm_name_from_user(filername, vm_name): """ the hostname is not equivalent to the virtual machine name ask user for the virtual machine name """ # get virtual machine names from filer vm_names = TFilers(filername).get_VMnames() if vm_name in vm_names: # given vm_name exists in the virtual machine names return vm_name while True: print """ Please select the virtual machine for the local host """ i = 1 for vm in vm_names: print "%3d : %s" % (i, vm) i += 1 answ = raw_input(""" Please type the Nr. of the virtual machine name for the local host [%s-%s] : """ % (1, i-1) ) if not answ.isdigit(): print "Input ERROR: Please type a number between 1 and %s" % i-1 else: return vm_names[int(answ)-1] def get_vm_snapshot(filername, vm_name): """ get the snapshts for the virtual machine from the filer print the list of Snapshots and ask the user which snapshot should be used for the FLR """ snapshots = TFilers(filername).get_VMsnapshots(vmName = vm_name) if not snapshots: print "No Snapshots found for VM" sys.exit(0) # sort the snapshots snapshots.sort(cmp=lambda x,y: cmp(x['createTime'], y['createTime'])) attrs=constants.snap_default_attrs header = ['Nr.'] for a in attrs: # pimp the header entrys if a == 'sizeChangedPhysicalMB': header.append('Changed MB') else: header.append(a) vm_length = len('vmName') rows = [] nr = 1 for snap in snapshots: row = [nr] for a in attrs: if a == 'createTime': # generate a date format row.append(strftime("%x %X", localtime(snap[a]/1000))) elif a == 'type': # the values are USER_GENERATED_SNAPSHOT, SCHEDULED_SNAPSHOT if snap[a] == 'USER_GENERATED_SNAPSHOT': row.append('manual') elif snap[a] == 'SCHEDULED_SNAPSHOT': row.append('scheduled') else: row.append(snap[a]) elif a == 'vmName': if vm_length < len(snap[a]): vm_length = len(snap[a]) row.append(snap[a]) else: row.append(snap[a]) nr += 1 rows.append(row) header.insert(1, vm_length) print("%3s %*s %17s %11s %22s %10s %7s" % (tuple(header))) for row in rows: row.insert(1, vm_length) print("%3d %*s %17s %11s %22s %10s %7s" % ((tuple(row)))) snap_nr = raw_input(""" Please enter the number of the Snapshot you want to recover from : """) if not snap_nr or len(snap_nr.strip()) == 0: print("No number given; Giving Up") sys.exit(255) return snapshots[int(snap_nr)-1] def map_snapshot(filername, vm_name, snap): """ map the disks of the given snapshot to the VM """ vm_uuid = TFilers(filername).get_VMuuid(vmName=vm_name) snap_uuid = snap['uuid']['uuid'] if verbose > 2: print "vm_uuid: %s \nsnap_uuid: %s" % (vm_uuid, snap_uuid) if verbose: print "... snapshot disks will be mapped to VM; Please be patient" TFilers(filername).flr_recover(vm_uuid, snap_uuid) def init_connection(filername, log_lvl): """ initialize the connection with the Filer """ import logging # Set LOGGING if log_lvl == 'DEBUG': LOG_LEVEL = logging.DEBUG elif log_lvl == 'INFO': LOG_LEVEL = logging.INFO elif log_lvl == 'WARNING': LOG_LEVEL = logging.WARNING elif log_lvl == 'ERROR': LOG_LEVEL = logging.ERROR elif log_lvl == 'CRITICAL': LOG_LEVEL = logging.CRITICAL else: LOG_LEVEL = logging.NOTSET try: tfiler = TFilers(filername, log_lvl=LOG_LEVEL) except errors.HTTP_Failure, err: print("ERROR: %s" % err.get_error()) sys.exit(1) except errors.API_Failure, err: print("ERROR: %s" % err.get_error()) sys.exit(1) return tfiler def usage(): print """ USAGE: %(prog)s [-v]* [-q] [(-f | --filer) <file-name>] [--vm <vm-name>] %(prog)s [-v]* [-q] [--reset | --reset_all] %(prog)s --version the meaning of the options: -v add verbosity -q | --quiet be quiet (only Error output) -f | --filer Tintri-Filername --vm | --vmname hypervisor virtual machine name --reset Unmount the FLR Partitions --reset_all Unmount the FLR Partitions and remove all cached informations about the Linux-partitions --version print programm version and exit --logging <lvl> Activate Logging of the API commands to the terminal. possible Levels are: DEBUG, INFO, WARNING, ERROR, CRITICAL the programm and the http_api are using the python "logging" module. The "reset" functionality doesn't need "filer" and "vm" parameters because these informations are stored in a temporary file in the "dbg_path". """ % { 'prog' : basename(sys.argv[0])} sys.exit(1) if __name__ == '__main__': # # Start of the Main-Prog # try: opts, args = getopt.getopt(sys.argv[1:], 'qvf:?', ['help', 'quiet', 'filer=', 'vmname=', 'reset', 'reset_all', 'version', 'logging=', ]) except: usage() filer = None vm_name = None log_lvl = None mRESET = False mRESET_ALL = False for o, a in opts: if o == "-v": verbose += 1 elif o == '--quiet' or o == '-q': verbose = 0 elif o == '--filer' or o == '-f': filer = a elif o == '--vmname': vm_name = a elif o == '--reset': mRESET = True elif o == '--reset_all': mRESET_ALL = True elif o == '--logging': log_lvl = a elif o == '--version': print "%s : %s" % (basename(sys.argv[0]), Version) sys.exit(0) else: usage() if mRESET: reset(log_lvl) elif mRESET_ALL: reset(True, log_lvl) # check if we have cached information about the Storage cache_info = init_env() if 'active' in cache_info and cache_info['active']: # it looks like an other snapshot is still mapped print """ ERROR: it looks like an other snapshot is still mapped Please call the command with the '--reset' option and restart again. """ sys.exit(3) if not 'first_fdisk' in cache_info or not cache_info['first_fdisk']: mnt_info = get_disk_mnt_info() first_fdisk = get_fdisk_info(dbg_fd) else: mnt_info = cache_info['mnt_info'] first_fdisk = cache_info['first_fdisk'] if not filer: filer = cache_info['filername'] if not vm_name: vm_name = cache_info['vm_name'] filername = check_tfiler(filer) init_connection(filername, log_lvl) # get virtual machine name; Could be different from hostname vm_name = get_vm_name_from_user(filername, vm_name) # write the found information to cache; Usefull for next call needed # for RESET write_first_fdisk(first_fdisk, mnt_info, filername, vm_name) # get Snapshots from Filer and ask user which one # should be mountet snap = get_vm_snapshot(filername, vm_name) map_snapshot(filername, vm_name, snap) search_for_new_disks() new_fdisk = get_fdisk_info(dbg_fd, first_fdisk) if not new_fdisk: print "ERROR: no new Disks found: :-(" sys.exit(0) mount_snap_disks(first_fdisk, new_fdisk, mnt_info) write_first_fdisk(first_fdisk, mnt_info, filername, vm_name, active=True)
#!/usr/bin/env python from setuptools import setup, find_packages import subprocess setup(name="stitchclient", version="0.9.3.post1", description="A Stitch API client for Python", author="Stitch", author_email="[email protected]", url="https://github.com/stitchdata/python-stitch-client", classifiers=['Programming Language :: Python :: 3 :: Only'], packages=find_packages() + find_packages(where="./transit"), install_requires=[ "python-dateutil==2.8.1", "msgpack-python", "requests==2.24.0", ] )
from __future__ import annotations from typing import Callable, List, Optional, Sequence, Tuple import numpy as np import thinc from cytoolz import itertoolz from thinc.api import Model, chain, concatenate def get_model_preds(model: Model, texts: List[str], classes: np.ndarray) -> List[str]: """ Get model predictions for multiple texts as class labels rather than as a 2dim matrix of prediction probabilities. """ # predict in batches, otherwise memory blows UP results = ( result for texts_pt in itertoolz.partition_all(1000, texts) for result in get_topn_preds_and_probs(model.predict(texts_pt), 1, classes) ) return [lang for result in results for lang, _ in result] def get_topn_preds_and_probs( preds: np.ndarray, topn: int, classes: np.ndarray, ) -> List[List[Tuple[str, float]]]: # TODO # if only need 1 (max) value, use faster numpy ops? # if topn == 1: # idxs = np.argmax(preds, axis=1) # pred_probs = np.max(preds, axis=1) # pred_langs = self.classes[idxs] # return list(zip(pred_langs, pred_probs)) # otherwise, do the full array sorts to get topn max # else: idxs = np.argsort(preds, axis=1)[:, ::-1][:, :topn] pred_probs = np.sort(preds, axis=1)[:, ::-1][:, :topn] pred_langs = classes[idxs] return [ list(zip(pred_langs[i], pred_probs[i])) for i in range(pred_probs.shape[0]) ] def LangIdentifierModelV2( ns: Sequence[int] = (1, 2, 3), embed_dim: int = 100, hidden_width: int = 512, dropout: Optional[float] = 0.1, ) -> Model[List[str], thinc.types.Floats2d]: """ Build a language identification model inspired by Google's CLD3. Args: ns: Set of "n" for which character "n"-grams are extracted from input texts. If 1, only unigrams (single characters) are used; if [1, 2], then both unigrams and bigrams are used; and so on. embed_dim: Size of the vectors into which each set of ngrams are embedded. hidden_width: Width of the dense layer with Relu activation, just before the final prediction (Softmax) layer. dropout: Dropout rate to avoid overfitting. Returns: Thinc :class:`Model`. """ with Model.define_operators({">>": chain}): model = ( MultiCharNgramsEmbedding( ns=list(ns), max_chars=1000, lower=True, num_vectors=[2000 * n for n in ns], embed_dims=embed_dim, dropout=dropout, ) >> thinc.layers.Relu( nI=embed_dim * len(ns), nO=hidden_width, dropout=dropout, ) >> thinc.layers.Softmax(nI=hidden_width) ) return model def MultiCharNgramsEmbedding( ns: List[int], max_chars: int, lower: bool, num_vectors: int | List[int], embed_dims: int | List[int], dropout: Optional[float], ) -> Model[List[str], thinc.types.Floats1d]: """ Args: ns max_chars lower num_vectors embed_dims dropout """ numn = len(ns) num_vectors = [num_vectors] * numn if isinstance(num_vectors, int) else num_vectors embed_dims = [embed_dims] * numn if isinstance(embed_dims, int) else embed_dims with Model.define_operators({">>": chain}): model = concatenate( *[ CharNgramsEmbedding( n=n, max_chars=max_chars, lower=lower, num_vectors=nvec, embed_dim=edim, dropout=dropout, ) for n, nvec, edim in zip(ns, num_vectors, embed_dims) ] ) return model def CharNgramsEmbedding( n: int, max_chars: int, lower: bool, num_vectors: int, embed_dim: int, dropout: Optional[float], ) -> Model[List[str], thinc.types.Floats1d]: """ Args: n max_chars lower num_vectors embed_dim dropout """ with Model.define_operators({">>": chain}): model = ( text_to_char_ngrams(n, max_chars, lower) >> thinc.layers.strings2arrays() >> thinc.layers.with_array( thinc.layers.HashEmbed( nO=embed_dim, nV=num_vectors, dropout=dropout, column=0, ) ) >> thinc.layers.list2ragged() >> thinc.layers.reduce_mean() ) return model def text_to_char_ngrams( n: int, max_chars: int, lower: bool, ) -> Model[List[str], List[List[str]]]: """ Custom data type transfer thinc layer that transforms a sequence of text strings into a sequence of sequence of character ngram strings. Like this:: ["a short text.", "another text."] => [["a ", " s", "sh", "ho", ...], ...] Args: n: Number of adjacent characters to combine into an ngram. max_chars: Max number of characters from the start of the text to transform into character ngrams. lower: If True, lowercase text before extracting character ngrams; otherwise, leave text casing as-is. """ def forward( model: Model, texts: List[str], is_train: bool ) -> Tuple[List[List[str]], Callable]: if lower is True: texts = (text[:max_chars].lower() for text in texts) else: texts = (text[:max_chars] for text in texts) if n == 1: char_ngs = [list(text) for text in texts] else: char_ngs = [ [text[i : i + n] for i in range(len(text) - n + 1)] for text in texts ] def backprop(dY): return [] return (char_ngs, backprop) return Model( "texts_to_char_ngrams", forward, attrs={"n": n, "max_chars": max_chars, "lower": lower}, )
# -*- coding: utf-8 -*- from __future__ import division import numpy as np from scipy.linalg import pascal, toeplitz __doc__ = """ See :ref:`Polynomials` for details and examples. .. toctree:: :hidden: tools/polynomial """ class Polynomial(object): r""" Polynomial function and its Abel transform. Supports multiplication and division by numbers. Parameters ---------- r : numpy array *r* values at which the function is generated (and *x* values for its Abel transform); must be non-negative and in ascending order r_min, r_max : float *r* domain: the function is defined as the polynomial on [**r_min**, **r_max**] and zero outside it; 0 ≤ **r_min** < **r_max** ≲ **max r** (**r_max** might exceed maximal **r**, but usually by < 1 pixel) c: numpy array polynomial coefficients in order of increasing degree: [c₀, c₁, c₂] means c₀ + c₁ *r* + c₂ *r*\ ² r_0 : float, optional origin shift: the polynomial is defined as c₀ + c₁ (*r* − **r_0**) + c₂ (*r* − **r_0**)² + ... s : float, optional *r* stretching factor (around **r_0**): the polynomial is defined as c₀ + c₁ (*r*/**s**) + c₂ (*r*/**s**)² + ... reduced : boolean, optional internally rescale the *r* range to [0, 1]; useful to avoid floating-point overflows for high degrees at large r (and might improve numeric accuracy) """ def __init__(self, r, r_min, r_max, c, r_0=0.0, s=1.0, reduced=False): # remove zero high-order terms c = np.array(np.trim_zeros(c, 'b'), float) # if all coefficients are zero if len(c) == 0: # then both func and abel are also zero everywhere self.func = np.zeros_like(r) self.abel = self.func return # polynomial degree K = len(c) - 1 if reduced: # rescale r to [0, 1] (to avoid FP overflow) r = r / r_max r_0 /= r_max s /= r_max abel_scale = r_max r_min /= r_max r_max = 1.0 if s != 1.0: # apply stretch S = np.cumprod([1.0] + [1.0 / s] * K) # powers of 1/s c *= S if r_0 != 0.0: # apply shift P = pascal(1 + K, 'upper', False) # binomial coefficients rk = np.cumprod([1.0] + [-float(r_0)] * K) # powers of -r_0 T = toeplitz([1.0] + [0.0] * K, rk) # upper-diag. (-r_0)^{l - k} c = (P * T).dot(c) # whether even and odd powers are present even = np.any(c[::2]) odd = np.any(c[1::2]) # index limits n = r.shape[0] i_min = np.searchsorted(r, r_min) i_max = np.searchsorted(r, r_max) # Calculate all necessary variables within [0, r_max] # x, x^2 x = r[:i_max] x2 = x * x # integration limits y = sqrt(r^2 - x^2) or 0 def sqrt0(x): return np.sqrt(x, np.zeros_like(x), where=x > 0) y_up = sqrt0(r_max * r_max - x2) y_lo = sqrt0(r_min * r_min - x2) # y r^k |_lo^up # (actually only even are neded for "even", and only odd for "odd") Dyr = np.outer(np.cumprod([1.0] + [r_max] * K), y_up) - \ np.outer(np.cumprod([1.0] + [r_min] * K), y_lo) # ln(r + y) |_lo^up, only for odd k if odd: # ln x for x > 0, otherwise 0 def ln0(x): return np.log(x, np.zeros_like(x), where=x > 0) Dlnry = ln0(r_max + y_up) - \ ln0(np.maximum(r_min, x) + y_lo) # One-sided Abel integral \int_lo^up r^k dy. def a(k): odd_k = k % 2 # max. x power K = k - odd_k # (k - 1 for odd k) # generate coefficients for all x^m r^{k - m} terms # (only even indices are actually used; # for odd k, C[K] is also used for x^{k+1} ln(r + y)) C = [0] * (K + 1) C[0] = 1 / (k + 1) for m in range(k, 1, -2): C[k - m + 2] = C[k - m] * m / (m - 1) # sum all terms using Horner's method in x a = C[K] * Dyr[k - K] if odd_k: a += C[K] * x2 * Dlnry for m in range(K - 2, -1, -2): a = a * x2 + C[m] * Dyr[k - m] return a # Generate the polynomial function func = np.zeros(n) span = slice(i_min, i_max) # (using Horner's method) func[span] = c[K] for k in range(K - 1, -1, -1): func[span] = func[span] * x[span] + c[k] self.func = func # Generate its Abel transform abel = np.zeros(n) span = slice(0, i_max) if reduced: c *= abel_scale for k in range(K + 1): if c[k]: abel[span] += c[k] * 2 * a(k) self.abel = abel def __imul__(self, m): """ In-place multiplication: Polynomial *= num. """ self.func *= m self.abel *= m return self def __mul__(self, m): """ Multiplication: Polynomial * num. """ res = self.__new__(type(self)) # create empty object (same type) res.func = self.func * m res.abel = self.abel * m return res __rmul__ = __mul__ __rmul__.__doc__ = \ """ Multiplication: num * Polynomial. """ def __itruediv__(self, m): """ In-place division: Polynomial /= num. """ return self.__imul__(1 / m) def __truediv__(self, m): """ Division: Polynomial / num. """ return self.__mul__(1 / m) # (Addition and subtraction are not implemented because they are # meaningful only for polynomials generated over the same r array. # Use PiecewisePolynomial for sums of polynomials.) class PiecewisePolynomial(Polynomial): r""" Piecewise polynomial function (sum of ``Polynomial``\ s) and its Abel transform. Supports multiplication and division by numbers. Parameters ---------- r : numpy array *r* values at which the function is generated (and *x* values for its Abel transform) ranges : iterable of unpackable (list of tuples of) polynomial parameters for each piece:: [(r_min_1st, r_max_1st, c_1st), (r_min_2nd, r_max_2nd, c_2nd), ... (r_min_nth, r_max_nth, c_nth)] according to ``Polynomial`` conventions. All ranges are independent (may overlap and have gaps, may define polynomials of any degrees) and may include optional ``Polynomial`` parameters """ def __init__(self, r, ranges): self.p = [] for rng in ranges: self.p.append(Polynomial(r, *rng)) func = self.p[0].func for p in self.p[1:]: func += p.func self.func = func abel = self.p[0].abel for p in self.p[1:]: abel += p.abel self.abel = abel # (Multiplication and division methods are inherited from Polynomial.)
def ___gmres_stop_criterion___(tol, atol, ITER, maxiter, BETA): """ :param tol: relative tolerance. :param atol: absolute tolerance :param ITER: :param maxiter: :param BETA: A list of beta (residual) of some recent iterations. :return: """ assert tol < 0.01, f"tol={tol} too large, should be < 0.01." # noinspection PyUnusedLocal info = 'TBD' beta0 = BETA[0] beta = BETA[-1] judge_1 = beta < atol # judge 1: reach absolute tolerance. judge_2 = ITER >= maxiter # judge 2: reach max iteration number # judge 3: divergence if BETA[-1] > BETA[-2]: # error grows after one iteration if BETA[-2] > 1 and (BETA[-1]-BETA[-2]) > 100 * BETA[-2]: judge_3 = True elif BETA[-1] > 10e6: judge_3 = True elif (BETA[-1]-BETA[-2]) > 100: judge_3 = True else: judge_3 = False else: judge_3 = False # judge 4: reach relative tol. if beta < beta0: progress = beta0 - beta if progress / beta0 < tol: # reach relative tol. judge_4 = True else: judge_4 = False else: judge_4 = False # judge_5: slow converging beta_old = BETA[-2] if beta < beta_old: progress = beta_old - beta if progress / beta_old < tol: # slow converging judge_5 = True else: judge_5 = False else: judge_5 = False # ... if judge_1 or judge_2 or judge_3 or judge_4 or judge_5: stop_iteration = True if judge_1: # reach atol info = 0 JUDGE = 1 JUDGE_explanation = 'reach absolute tol' elif judge_2: # reach maxiter info = ITER JUDGE = 2 JUDGE_explanation = 'reach maxiter' elif judge_3: # diverging info = -1 JUDGE = 3 JUDGE_explanation = 'diverging' elif judge_4: # reach tol info = 0 JUDGE = 4 JUDGE_explanation = 'reach relative tol' elif judge_5: # very slow converging; the progress is lower than the tol info = ITER JUDGE = 5 JUDGE_explanation = 'very slow converging' else: raise Exception() else: # do not stop iterations. stop_iteration = False info = None JUDGE = 0 JUDGE_explanation = '' assert stop_iteration in (True, False), "stop_iteration has to be set." assert info != 'TBD', "info has to be updated" return JUDGE, stop_iteration, info, JUDGE_explanation
from __future__ import print_function import re import pytest from click.testing import CliRunner from dagster.cli.pipeline import execute_scaffold_command, pipeline_scaffold_command from .test_cli_commands import ( valid_external_pipeline_target_args, valid_external_pipeline_target_cli_args, ) def no_print(_): return None @pytest.mark.parametrize('scaffold_args', valid_external_pipeline_target_args()) def test_scaffold_command(scaffold_args): cli_args, uses_legacy_repository_yaml_format = scaffold_args if uses_legacy_repository_yaml_format: with pytest.warns( UserWarning, match=re.escape( 'You are using the legacy repository yaml format. Please update your file ' ), ): cli_args['print_only_required'] = True execute_scaffold_command(cli_args=cli_args, print_fn=no_print) cli_args['print_only_required'] = False execute_scaffold_command(cli_args=cli_args, print_fn=no_print) else: cli_args['print_only_required'] = True execute_scaffold_command(cli_args=cli_args, print_fn=no_print) cli_args['print_only_required'] = False execute_scaffold_command(cli_args=cli_args, print_fn=no_print) @pytest.mark.parametrize('execute_cli_args', valid_external_pipeline_target_cli_args()) def test_scaffold_command_cli(execute_cli_args): cli_args, uses_legacy_repository_yaml_format = execute_cli_args runner = CliRunner() if uses_legacy_repository_yaml_format: with pytest.warns( UserWarning, match=re.escape( 'You are using the legacy repository yaml format. Please update your file ' ), ): result = runner.invoke(pipeline_scaffold_command, cli_args) assert result.exit_code == 0 result = runner.invoke(pipeline_scaffold_command, ['--print-only-required'] + cli_args) assert result.exit_code == 0 else: result = runner.invoke(pipeline_scaffold_command, cli_args) assert result.exit_code == 0 result = runner.invoke(pipeline_scaffold_command, ['--print-only-required'] + cli_args) assert result.exit_code == 0
#!/usr/bin/env python # roulette.py - a russian roulette for phenny import random deaths = ("valt dood.", "ziet er verbaasd uit.", "schiet zichzelf overhoop.", "maakt het meubilair vuil.", "- in Soviet Russia, gun levels you.", "nu als spook!", "gaat naar de eeuwige jachtvelden.", "heeft nu hoofdventilatie.", "scoort een punt.", "weet nu wat geluk hebben is.", "gaat het hoekje om.", "gaat om zeep", "houdt plots op met ademen", "zou nu wel een gaatjesdichtende kurk kunnen gebruiken", "heeft nu een probleem dat niet zomaar even met wat hoestsiroop opgelost kan worden", " - nu met extra lood!") def setup(self): self.roulette={} self.roulette['run']=False def spin(phenny, input): gun = phenny.roulette['gun'] pos = random.randint(0,len(gun)-1) gun = gun[pos:]+gun[:pos] phenny.roulette['gun']=gun def rrload(phenny, input): if phenny.roulette['run']: phenny.say('Eerst leegmaken, dan krijg je nieuwe kogels...') return bullets = 1 chambers = 6 try: params = input.split(" ") bullets = int(params[1]) chambers = int(params[2]) except: pass chambers = max(2,min(chambers,100)) bullets = max(1,min(bullets,100)) if bullets > chambers: bullets = chambers gun = [False]*chambers for bullet in range(0,bullets): gun[bullet]=True phenny.roulette['gun']=gun spin(phenny, input) phenny.roulette['run']=True strbul = str(bullets) + ((bullets == 1) and " kogel" or " kogels") strcha = str(chambers) + ((chambers == 1) and " kamer" or " kamers") phenny.say("Hier is een revolver met "+strbul+" in "+strcha+". Do you feel lucky, punk? Huh? Do you?") rrload.commands=["load"] rrload.thread=False def rrspin(phenny, input): if phenny.roulette['run']: spin(phenny, input) phenny.say("RRRRR... ["+input.nick + " draait aan de cylinder.] ...kaCHINK!") rrspin.commands=["spin"] rrspin.thread=False def rrclick(phenny, input): if phenny.roulette['run']: gun = phenny.roulette['gun'] next = gun[0] if next: phenny.say("BLAM! "+input.nick+" "+random.choice(deaths)) phenny.roulette['run']=False else: phenny.say("Klik. Er gebeurt niets.") gun = gun[1:]+gun[:1] phenny.roulette['gun']=gun else: phenny.say("Er gebeurt niets.") phenny.say("Omdat de revolver niet geladen is, duhh!") rrclick.commands=["pull"] rrclick.thread=False
from Qt import QtWidgets, QtCore DEFAULT_COLOR = "#fb9c15" class View(QtWidgets.QTreeView): data_changed = QtCore.Signal() def __init__(self, parent=None): super(View, self).__init__(parent=parent) # view settings self.setAlternatingRowColors(False) self.setSortingEnabled(True) self.setSelectionMode(self.ExtendedSelection) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) def get_indices(self): """Get the selected rows""" selection_model = self.selectionModel() return selection_model.selectedRows() def extend_to_children(self, indices): """Extend the indices to the children indices. Top-level indices are extended to its children indices. Sub-items are kept as is. :param indices: The indices to extend. :type indices: list :return: The children indices :rtype: list """ subitems = set() for i in indices: valid_parent = i.parent().isValid() if valid_parent and i not in subitems: subitems.add(i) else: # is top level node model = i.model() rows = model.rowCount(parent=i) for row in range(rows): child = model.index(row, 0, parent=i) subitems.add(child) return list(subitems)
#!/usr/bin/env python import base64 import random from flask import Flask, render_template, request from flags import flags from create_hearts import get_hearts_svg app = Flask(__name__) @app.template_filter("base64") def encode_as_base64(xml_string): return base64.b64encode(xml_string.encode("ascii")).decode("ascii") @app.template_filter("name") def get_name(flag_pair): try: return flag_pair[1]["name"] except KeyError: return flag_pair[0] @app.template_filter("url") def get_url(flag_pair): return flag_pair[1]["url"] RENAMED_FLAGS = { # Renamed after I realised the polyamory and polysexual flags are separate. # See https://github.com/queerjs/website/issues/59 "poly": "polysexual", } @app.route("/") def index(): selected_flags = { "left_flag": random.choice(list(flags.items())), "right_flag": random.choice(list(flags.items())), } for label in selected_flags: try: flag_name = request.args[label] flag_name = RENAMED_FLAGS.get(flag_name, flag_name) flag_data = flags[flag_name] except KeyError: pass else: selected_flags[label] = (flag_name, flag_data) svg_xml = get_hearts_svg(**selected_flags) return render_template("index.html", svg_xml=svg_xml, flags=selected_flags) if __name__ == "__main__": app.run(debug=True)
from find_successor_in_bst import find_successor_of_node_in_bst, Node import unittest class Test_Case_Find_Successor_In_Bst(unittest.TestCase): def test_find_successor_in_bst(self): root = build_binary_search_tree([1,2,3,4,5,6,7]) three_node = root.left_child.right_child ans = find_successor_of_node_in_bst(three_node) self.assertEqual(ans, root) def build_binary_search_tree(arr): if len(arr) == 1: return Node(arr[0]) mid = len(arr) // 2 node = Node(arr[mid]) node.left_child = build_binary_search_tree(arr[0:mid]) node.left_child.parent = node if len(arr) > 2: node.right_child = build_binary_search_tree(arr[mid+1:]) node.right_child.parent = node return node
""" Given an input string, reverse the string word by word. For example, Given s = "the sky is blue", return "blue is sky the". For C programmers: Try to solve it in-place in O(1) space. Clarification: * What constitutes a word? A sequence of non-space characters constitutes a word. * Could the input string contain leading or trailing spaces? Yes. However, your reversed string should not contain leading or trailing spaces. * How about multiple spaces between two words? Reduce them to a single space in the reversed string. https://leetcode.com/problems/reverse-words-in-a-string/ """ class Solution: # @param s, a string # @return a string def reverseWords(self, s): elements = s.split(" ") elements = [x for x in elements if x != ""] elements = elements[::-1] return " ".join(elements)
from copy import copy from typing import Union from hdlConvertorAst.hdlAst._defs import HdlIdDef from hdlConvertorAst.hdlAst._expr import HdlValueInt, HdlOp, HdlOpType, \ HdlValueId from hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils import hdl_getattr, \ hdl_call from hdlConvertorAst.translate.common.name_scope import ObjectForNameNotFound from hwt.hdl.types.arrayVal import HArrayVal from hwt.hdl.types.bitsVal import BitsVal from hwt.hdl.types.defs import SLICE from hwt.hdl.types.enum import HEnum from hwt.hdl.types.enumVal import HEnumVal from hwt.hdl.types.sliceVal import HSliceVal from hwt.hdl.value import HValue from hwt.hdl.variables import SignalItem from hwt.serializer.generic.value import ToHdlAst_Value from hwt.serializer.simModel.value import ToHdlAstSimModel_value class ToHdlAstHwt_value(ToHdlAst_Value): NONE = HdlValueId("None") SLICE = HdlValueId("SLICE", obj=SLICE) def is_suitable_for_const_extract(self, val: HValue): # full valid values can be represented as int and do not have any # constructor overhead, entirely invalid values can be represented by None return not val._is_full_valid() and not isinstance(val._dtype, HEnum) def as_hdl_BitsVal(self, val: BitsVal): isFullVld = val._is_full_valid() if not self._valueWidthRequired: if isFullVld: return HdlValueInt(val.val, None, 16) elif val.vld_mask == 0: return self.NONE t = self.as_hdl_HdlType_bits(val._dtype, declaration=False) c = hdl_getattr(t, "from_py") args = [HdlValueInt(val.val, None, 16), ] if not isFullVld: args.append(HdlValueInt(val.vld_mask, None, 16)) return hdl_call(c, args) def as_hdl_SignalItem(self, si: Union[SignalItem, HdlIdDef], declaration=False): if declaration: if isinstance(si, HdlIdDef): new_si = copy(si) new_si.type = self.as_hdl_HdlType(si.type) if si.value is not None: new_si.value = self.as_hdl_Value(si.value) return new_si else: raise NotImplementedError() else: # if isinstance(si, SignalItem) and si._const: # # to allow const cache to extract constants # return self.as_hdl_Value(si._val) if si.hidden and si.origin is not None: return self.as_hdl(si.origin) else: return HdlValueId(si.name, obj=si) def as_hdl_DictVal(self, val): return ToHdlAstSimModel_value.as_hdl_DictVal(self, val) def as_hdl_HArrayVal(self, val: HArrayVal): if not val.vld_mask: return self.NONE # else: # if len(val.val) == val._dtype.size: # allValuesSame = True # values = iter(val.val.values()) # reference = next(values) # for v in values: # if allValuesSame: # allValuesSame = isSameHVal(reference, v) # else: # break # if allValuesSame: # # all values of items in array are same, use generator # # exression # raise NotImplementedError() # return "[%s for _ in range(%d)]" % (self.Value(reference)) # if value can not be simplified it is required to serialize it item # by item return self.as_hdl_DictVal(val.val) def as_hdl_HSliceVal(self, val: HSliceVal): if val._is_full_valid(): return HdlOp( HdlOpType.DOWNTO, [ HdlValueInt(int(val.val.start), None, None), HdlValueInt(int(val.val.stop), None, None) ]) else: raise NotImplementedError() return "HSliceVal(slice(%s, %s, %s), SLICE, %d)" % ( self.as_hdl_Value(val.val.start), self.as_hdl_Value(val.val.stop), self.as_hdl_Value(val.val.step), val.vld_mask) def as_hdl_HEnumVal(self, val: HEnumVal): try: t_name = self.name_scope.get_object_name(val._dtype) except ObjectForNameNotFound: if self.debug: t_name = val._dtype.name else: raise if val.vld_mask: try: name = self.name_scope.get_object_name(val) except ObjectForNameNotFound: if self.debug: name = val.val else: raise return hdl_getattr(HdlValueId(t_name, obj=val._dtype), name) else: return hdl_call(hdl_getattr(HdlValueId(t_name, obj=val._dtype), "from_py"), [None, ])
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'GUI.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(861, 431) Dialog.setAutoFillBackground(False) self.groupBox = QtWidgets.QGroupBox(Dialog) self.groupBox.setGeometry(QtCore.QRect(10, 10, 531, 171)) font = QtGui.QFont() font.setPointSize(10) font.setBold(True) font.setWeight(75) self.groupBox.setFont(font) self.groupBox.setObjectName("groupBox") self.pushButton_getdata = QtWidgets.QPushButton(self.groupBox) self.pushButton_getdata.setGeometry(QtCore.QRect(10, 20, 211, 31)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_getdata.setFont(font) self.pushButton_getdata.setObjectName("pushButton_getdata") self.label_9 = QtWidgets.QLabel(self.groupBox) self.label_9.setGeometry(QtCore.QRect(60, 100, 81, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_9.setFont(font) self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_9.setObjectName("label_9") self.textEdit_filepath = QtWidgets.QTextEdit(self.groupBox) self.textEdit_filepath.setEnabled(False) self.textEdit_filepath.setGeometry(QtCore.QRect(230, 30, 291, 61)) font = QtGui.QFont() font.setPointSize(8) font.setBold(False) font.setWeight(50) self.textEdit_filepath.setFont(font) self.textEdit_filepath.setObjectName("textEdit_filepath") self.label_11 = QtWidgets.QLabel(self.groupBox) self.label_11.setGeometry(QtCore.QRect(230, 10, 101, 16)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.doubleSpinBox_tracklength = QtWidgets.QDoubleSpinBox(self.groupBox) self.doubleSpinBox_tracklength.setEnabled(False) self.doubleSpinBox_tracklength.setGeometry(QtCore.QRect(150, 100, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_tracklength.setFont(font) self.doubleSpinBox_tracklength.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_tracklength.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_tracklength.setSpecialValueText("") self.doubleSpinBox_tracklength.setDecimals(4) self.doubleSpinBox_tracklength.setMaximum(1000.0) self.doubleSpinBox_tracklength.setSingleStep(0.0) self.doubleSpinBox_tracklength.setObjectName("doubleSpinBox_tracklength") self.pushButton_graphtrack = QtWidgets.QPushButton(self.groupBox) self.pushButton_graphtrack.setGeometry(QtCore.QRect(10, 60, 211, 31)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.pushButton_graphtrack.setFont(font) self.pushButton_graphtrack.setObjectName("pushButton_graphtrack") self.doubleSpinBox_resolution = QtWidgets.QDoubleSpinBox(self.groupBox) self.doubleSpinBox_resolution.setEnabled(False) self.doubleSpinBox_resolution.setGeometry(QtCore.QRect(420, 100, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_resolution.setFont(font) self.doubleSpinBox_resolution.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_resolution.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_resolution.setSpecialValueText("") self.doubleSpinBox_resolution.setDecimals(5) self.doubleSpinBox_resolution.setMaximum(10.0) self.doubleSpinBox_resolution.setSingleStep(0.0) self.doubleSpinBox_resolution.setObjectName("doubleSpinBox_resolution") self.label_16 = QtWidgets.QLabel(self.groupBox) self.label_16.setGeometry(QtCore.QRect(310, 100, 101, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_16.setFont(font) self.label_16.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_16.setObjectName("label_16") self.label_20 = QtWidgets.QLabel(self.groupBox) self.label_20.setGeometry(QtCore.QRect(250, 140, 161, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_20.setFont(font) self.label_20.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_20.setObjectName("label_20") self.doubleSpinBox_datafreq = QtWidgets.QDoubleSpinBox(self.groupBox) self.doubleSpinBox_datafreq.setEnabled(False) self.doubleSpinBox_datafreq.setGeometry(QtCore.QRect(420, 140, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_datafreq.setFont(font) self.doubleSpinBox_datafreq.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_datafreq.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_datafreq.setSpecialValueText("") self.doubleSpinBox_datafreq.setDecimals(1) self.doubleSpinBox_datafreq.setMaximum(10000.0) self.doubleSpinBox_datafreq.setSingleStep(0.0) self.doubleSpinBox_datafreq.setObjectName("doubleSpinBox_datafreq") self.label_21 = QtWidgets.QLabel(self.groupBox) self.label_21.setGeometry(QtCore.QRect(10, 140, 131, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_21.setFont(font) self.label_21.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_21.setObjectName("label_21") self.spinBox_dataN = QtWidgets.QSpinBox(self.groupBox) self.spinBox_dataN.setEnabled(False) self.spinBox_dataN.setGeometry(QtCore.QRect(150, 140, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) font.setKerning(True) self.spinBox_dataN.setFont(font) self.spinBox_dataN.setAlignment(QtCore.Qt.AlignCenter) self.spinBox_dataN.setReadOnly(False) self.spinBox_dataN.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.spinBox_dataN.setMinimum(0) self.spinBox_dataN.setMaximum(100000) self.spinBox_dataN.setObjectName("spinBox_dataN") self.groupBox_2 = QtWidgets.QGroupBox(Dialog) self.groupBox_2.setGeometry(QtCore.QRect(270, 190, 271, 141)) font = QtGui.QFont() font.setPointSize(10) font.setBold(True) font.setWeight(75) self.groupBox_2.setFont(font) self.groupBox_2.setObjectName("groupBox_2") self.label_8 = QtWidgets.QLabel(self.groupBox_2) self.label_8.setGeometry(QtCore.QRect(-10, 20, 161, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_8.setFont(font) self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_8.setObjectName("label_8") self.label_6 = QtWidgets.QLabel(self.groupBox_2) self.label_6.setGeometry(QtCore.QRect(30, 80, 121, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_6.setFont(font) self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_6.setObjectName("label_6") self.label_5 = QtWidgets.QLabel(self.groupBox_2) self.label_5.setGeometry(QtCore.QRect(10, 50, 141, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_5.setFont(font) self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_5.setObjectName("label_5") self.label_7 = QtWidgets.QLabel(self.groupBox_2) self.label_7.setGeometry(QtCore.QRect(60, 110, 91, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_7.setFont(font) self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_7.setObjectName("label_7") self.spinBox_dampingfac = QtWidgets.QSpinBox(self.groupBox_2) self.spinBox_dampingfac.setGeometry(QtCore.QRect(160, 110, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.spinBox_dampingfac.setFont(font) self.spinBox_dampingfac.setAlignment(QtCore.Qt.AlignCenter) self.spinBox_dampingfac.setMinimum(1) self.spinBox_dampingfac.setObjectName("spinBox_dampingfac") self.doubleSpinBox_shockdisp = QtWidgets.QDoubleSpinBox(self.groupBox_2) self.doubleSpinBox_shockdisp.setEnabled(True) self.doubleSpinBox_shockdisp.setGeometry(QtCore.QRect(160, 20, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_shockdisp.setFont(font) self.doubleSpinBox_shockdisp.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_shockdisp.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_shockdisp.setSpecialValueText("") self.doubleSpinBox_shockdisp.setDecimals(4) self.doubleSpinBox_shockdisp.setMinimum(0.01) self.doubleSpinBox_shockdisp.setMaximum(10.0) self.doubleSpinBox_shockdisp.setSingleStep(0.0) self.doubleSpinBox_shockdisp.setObjectName("doubleSpinBox_shockdisp") self.doubleSpinBox_shockspring = QtWidgets.QDoubleSpinBox(self.groupBox_2) self.doubleSpinBox_shockspring.setGeometry(QtCore.QRect(160, 50, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_shockspring.setFont(font) self.doubleSpinBox_shockspring.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_shockspring.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_shockspring.setSpecialValueText("") self.doubleSpinBox_shockspring.setDecimals(4) self.doubleSpinBox_shockspring.setMaximum(10000.0) self.doubleSpinBox_shockspring.setSingleStep(0.0) self.doubleSpinBox_shockspring.setObjectName("doubleSpinBox_shockspring") self.doubleSpinBox_tirespring = QtWidgets.QDoubleSpinBox(self.groupBox_2) self.doubleSpinBox_tirespring.setGeometry(QtCore.QRect(160, 80, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_tirespring.setFont(font) self.doubleSpinBox_tirespring.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_tirespring.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_tirespring.setSpecialValueText("") self.doubleSpinBox_tirespring.setDecimals(4) self.doubleSpinBox_tirespring.setMaximum(10000.0) self.doubleSpinBox_tirespring.setSingleStep(0.0) self.doubleSpinBox_tirespring.setObjectName("doubleSpinBox_tirespring") self.groupBox_3 = QtWidgets.QGroupBox(Dialog) self.groupBox_3.setGeometry(QtCore.QRect(10, 190, 251, 141)) font = QtGui.QFont() font.setPointSize(10) font.setBold(True) font.setWeight(75) self.groupBox_3.setFont(font) self.groupBox_3.setObjectName("groupBox_3") self.label = QtWidgets.QLabel(self.groupBox_3) self.label.setGeometry(QtCore.QRect(50, 20, 81, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label.setFont(font) self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(self.groupBox_3) self.label_2.setGeometry(QtCore.QRect(40, 50, 91, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_2.setFont(font) self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_2.setObjectName("label_2") self.label_3 = QtWidgets.QLabel(self.groupBox_3) self.label_3.setGeometry(QtCore.QRect(10, 80, 121, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_3.setFont(font) self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_3.setObjectName("label_3") self.label_4 = QtWidgets.QLabel(self.groupBox_3) self.label_4.setGeometry(QtCore.QRect(40, 110, 91, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_4.setFont(font) self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_4.setObjectName("label_4") self.doubleSpinBox_bodyweight = QtWidgets.QDoubleSpinBox(self.groupBox_3) self.doubleSpinBox_bodyweight.setGeometry(QtCore.QRect(140, 20, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_bodyweight.setFont(font) self.doubleSpinBox_bodyweight.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_bodyweight.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_bodyweight.setSpecialValueText("") self.doubleSpinBox_bodyweight.setDecimals(4) self.doubleSpinBox_bodyweight.setMaximum(1000.0) self.doubleSpinBox_bodyweight.setSingleStep(0.0) self.doubleSpinBox_bodyweight.setObjectName("doubleSpinBox_bodyweight") self.doubleSpinBox_CG = QtWidgets.QDoubleSpinBox(self.groupBox_3) self.doubleSpinBox_CG.setEnabled(False) self.doubleSpinBox_CG.setGeometry(QtCore.QRect(140, 50, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_CG.setFont(font) self.doubleSpinBox_CG.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_CG.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_CG.setSpecialValueText("") self.doubleSpinBox_CG.setDecimals(4) self.doubleSpinBox_CG.setMaximum(1.0) self.doubleSpinBox_CG.setSingleStep(0.0) self.doubleSpinBox_CG.setObjectName("doubleSpinBox_CG") self.doubleSpinBox_wheelweight = QtWidgets.QDoubleSpinBox(self.groupBox_3) self.doubleSpinBox_wheelweight.setGeometry(QtCore.QRect(140, 80, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_wheelweight.setFont(font) self.doubleSpinBox_wheelweight.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_wheelweight.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_wheelweight.setSpecialValueText("") self.doubleSpinBox_wheelweight.setDecimals(4) self.doubleSpinBox_wheelweight.setMaximum(1000.0) self.doubleSpinBox_wheelweight.setSingleStep(0.0) self.doubleSpinBox_wheelweight.setObjectName("doubleSpinBox_wheelweight") self.doubleSpinBox_tireradius = QtWidgets.QDoubleSpinBox(self.groupBox_3) self.doubleSpinBox_tireradius.setGeometry(QtCore.QRect(140, 110, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_tireradius.setFont(font) self.doubleSpinBox_tireradius.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_tireradius.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_tireradius.setSpecialValueText("") self.doubleSpinBox_tireradius.setDecimals(4) self.doubleSpinBox_tireradius.setMaximum(10.0) self.doubleSpinBox_tireradius.setSingleStep(0.0) self.doubleSpinBox_tireradius.setObjectName("doubleSpinBox_tireradius") self.groupBox_4 = QtWidgets.QGroupBox(Dialog) self.groupBox_4.setGeometry(QtCore.QRect(10, 340, 251, 81)) font = QtGui.QFont() font.setPointSize(10) font.setBold(True) font.setWeight(75) self.groupBox_4.setFont(font) self.groupBox_4.setObjectName("groupBox_4") self.label_10 = QtWidgets.QLabel(self.groupBox_4) self.label_10.setGeometry(QtCore.QRect(20, 20, 111, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_10.setFont(font) self.label_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_10.setObjectName("label_10") self.label_12 = QtWidgets.QLabel(self.groupBox_4) self.label_12.setGeometry(QtCore.QRect(0, 50, 131, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_12.setFont(font) self.label_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_12.setObjectName("label_12") self.spinBox_wishboneN = QtWidgets.QSpinBox(self.groupBox_4) self.spinBox_wishboneN.setGeometry(QtCore.QRect(140, 50, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.spinBox_wishboneN.setFont(font) self.spinBox_wishboneN.setAlignment(QtCore.Qt.AlignCenter) self.spinBox_wishboneN.setMinimum(1) self.spinBox_wishboneN.setMaximum(10) self.spinBox_wishboneN.setObjectName("spinBox_wishboneN") self.doubleSpinBox_wblength = QtWidgets.QDoubleSpinBox(self.groupBox_4) self.doubleSpinBox_wblength.setGeometry(QtCore.QRect(140, 20, 101, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_wblength.setFont(font) self.doubleSpinBox_wblength.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_wblength.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_wblength.setSpecialValueText("") self.doubleSpinBox_wblength.setDecimals(4) self.doubleSpinBox_wblength.setMaximum(10.0) self.doubleSpinBox_wblength.setSingleStep(0.0) self.doubleSpinBox_wblength.setObjectName("doubleSpinBox_wblength") self.groupBox_5 = QtWidgets.QGroupBox(Dialog) self.groupBox_5.setGeometry(QtCore.QRect(270, 340, 271, 81)) font = QtGui.QFont() font.setPointSize(10) font.setBold(True) font.setWeight(75) self.groupBox_5.setFont(font) self.groupBox_5.setObjectName("groupBox_5") self.label_13 = QtWidgets.QLabel(self.groupBox_5) self.label_13.setGeometry(QtCore.QRect(40, 20, 101, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_13.setFont(font) self.label_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_13.setObjectName("label_13") self.label_14 = QtWidgets.QLabel(self.groupBox_5) self.label_14.setGeometry(QtCore.QRect(50, 50, 91, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_14.setFont(font) self.label_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_14.setObjectName("label_14") self.doubleSpinBox_initXvel = QtWidgets.QDoubleSpinBox(self.groupBox_5) self.doubleSpinBox_initXvel.setGeometry(QtCore.QRect(150, 20, 111, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_initXvel.setFont(font) self.doubleSpinBox_initXvel.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_initXvel.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_initXvel.setSpecialValueText("") self.doubleSpinBox_initXvel.setDecimals(4) self.doubleSpinBox_initXvel.setMaximum(1000.0) self.doubleSpinBox_initXvel.setSingleStep(0.0) self.doubleSpinBox_initXvel.setObjectName("doubleSpinBox_initXvel") self.doubleSpinBox_initYvel = QtWidgets.QDoubleSpinBox(self.groupBox_5) self.doubleSpinBox_initYvel.setEnabled(False) self.doubleSpinBox_initYvel.setGeometry(QtCore.QRect(150, 50, 111, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_initYvel.setFont(font) self.doubleSpinBox_initYvel.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_initYvel.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_initYvel.setSpecialValueText("") self.doubleSpinBox_initYvel.setDecimals(4) self.doubleSpinBox_initYvel.setMaximum(1000.0) self.doubleSpinBox_initYvel.setSingleStep(0.0) self.doubleSpinBox_initYvel.setObjectName("doubleSpinBox_initYvel") self.groupBox_6 = QtWidgets.QGroupBox(Dialog) self.groupBox_6.setGeometry(QtCore.QRect(550, 10, 301, 371)) font = QtGui.QFont() font.setPointSize(10) font.setBold(True) font.setWeight(75) self.groupBox_6.setFont(font) self.groupBox_6.setObjectName("groupBox_6") self.pushButton_solve = QtWidgets.QPushButton(self.groupBox_6) self.pushButton_solve.setGeometry(QtCore.QRect(10, 20, 281, 31)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_solve.setFont(font) self.pushButton_solve.setObjectName("pushButton_solve") self.label_15 = QtWidgets.QLabel(self.groupBox_6) self.label_15.setGeometry(QtCore.QRect(80, 60, 91, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_15.setFont(font) self.label_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_15.setObjectName("label_15") self.doubleSpinBox_sag = QtWidgets.QDoubleSpinBox(self.groupBox_6) self.doubleSpinBox_sag.setEnabled(False) self.doubleSpinBox_sag.setGeometry(QtCore.QRect(180, 60, 111, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_sag.setFont(font) self.doubleSpinBox_sag.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_sag.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_sag.setSpecialValueText("") self.doubleSpinBox_sag.setDecimals(1) self.doubleSpinBox_sag.setMaximum(1000.0) self.doubleSpinBox_sag.setSingleStep(0.0) self.doubleSpinBox_sag.setObjectName("doubleSpinBox_sag") self.label_17 = QtWidgets.QLabel(self.groupBox_6) self.label_17.setGeometry(QtCore.QRect(40, 90, 131, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_17.setFont(font) self.label_17.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_17.setObjectName("label_17") self.doubleSpinBox_maxsag = QtWidgets.QDoubleSpinBox(self.groupBox_6) self.doubleSpinBox_maxsag.setEnabled(False) self.doubleSpinBox_maxsag.setGeometry(QtCore.QRect(180, 90, 111, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_maxsag.setFont(font) self.doubleSpinBox_maxsag.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_maxsag.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_maxsag.setSpecialValueText("") self.doubleSpinBox_maxsag.setDecimals(1) self.doubleSpinBox_maxsag.setMaximum(1000.0) self.doubleSpinBox_maxsag.setSingleStep(0.0) self.doubleSpinBox_maxsag.setObjectName("doubleSpinBox_maxsag") self.label_18 = QtWidgets.QLabel(self.groupBox_6) self.label_18.setGeometry(QtCore.QRect(10, 190, 101, 16)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.label_18.setFont(font) self.label_18.setObjectName("label_18") self.textEdit_output = QtWidgets.QTextEdit(self.groupBox_6) self.textEdit_output.setEnabled(False) self.textEdit_output.setGeometry(QtCore.QRect(10, 210, 281, 151)) font = QtGui.QFont() font.setPointSize(8) font.setBold(False) font.setWeight(50) self.textEdit_output.setFont(font) self.textEdit_output.setObjectName("textEdit_output") self.pushButton_optimizespring = QtWidgets.QPushButton(self.groupBox_6) self.pushButton_optimizespring.setGeometry(QtCore.QRect(30, 120, 241, 31)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.pushButton_optimizespring.setFont(font) self.pushButton_optimizespring.setObjectName("pushButton_optimizespring") self.doubleSpinBox_shockspringoptimized = QtWidgets.QDoubleSpinBox(self.groupBox_6) self.doubleSpinBox_shockspringoptimized.setEnabled(False) self.doubleSpinBox_shockspringoptimized.setGeometry(QtCore.QRect(180, 160, 111, 22)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.doubleSpinBox_shockspringoptimized.setFont(font) self.doubleSpinBox_shockspringoptimized.setAlignment(QtCore.Qt.AlignCenter) self.doubleSpinBox_shockspringoptimized.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons) self.doubleSpinBox_shockspringoptimized.setSpecialValueText("") self.doubleSpinBox_shockspringoptimized.setDecimals(4) self.doubleSpinBox_shockspringoptimized.setMaximum(10000.0) self.doubleSpinBox_shockspringoptimized.setSingleStep(0.0) self.doubleSpinBox_shockspringoptimized.setObjectName("doubleSpinBox_shockspringoptimized") self.label_19 = QtWidgets.QLabel(self.groupBox_6) self.label_19.setGeometry(QtCore.QRect(10, 160, 161, 21)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_19.setFont(font) self.label_19.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_19.setObjectName("label_19") self.pushButton_exit = QtWidgets.QPushButton(Dialog) self.pushButton_exit.setGeometry(QtCore.QRect(710, 390, 141, 31)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.pushButton_exit.setFont(font) self.pushButton_exit.setObjectName("pushButton_exit") self.pushButton_clear = QtWidgets.QPushButton(Dialog) self.pushButton_clear.setGeometry(QtCore.QRect(550, 390, 141, 31)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.pushButton_clear.setFont(font) self.pushButton_clear.setObjectName("pushButton_clear") self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "Dialog")) self.groupBox.setTitle(_translate("Dialog", "Ground Data")) self.pushButton_getdata.setText(_translate("Dialog", "Load Ground Coords")) self.label_9.setText(_translate("Dialog", "Track Length")) self.label_11.setText(_translate("Dialog", "Filepath Chosen")) self.doubleSpinBox_tracklength.setSuffix(_translate("Dialog", " m")) self.pushButton_graphtrack.setText(_translate("Dialog", "Graph Chosen Track")) self.doubleSpinBox_resolution.setSuffix(_translate("Dialog", " m")) self.label_16.setText(_translate("Dialog", "X-axis Resolution")) self.label_20.setText(_translate("Dialog", "Data Recording Frequency")) self.doubleSpinBox_datafreq.setSuffix(_translate("Dialog", " Hz")) self.label_21.setText(_translate("Dialog", "Number of Data Points")) self.groupBox_2.setTitle(_translate("Dialog", "Suspension Data")) self.label_8.setText(_translate("Dialog", "Max Shock Displacement")) self.label_6.setText(_translate("Dialog", "Tire Stiffness")) self.label_5.setText(_translate("Dialog", "Shock Springrate")) self.label_7.setText(_translate("Dialog", "Shock Damping Factor")) self.doubleSpinBox_shockdisp.setSuffix(_translate("Dialog", " m")) self.doubleSpinBox_shockspring.setSuffix(_translate("Dialog", " N m")) self.doubleSpinBox_tirespring.setSuffix(_translate("Dialog", " N m")) self.groupBox_3.setTitle(_translate("Dialog", "Vehicle Data")) self.label.setText(_translate("Dialog", "Body Weight")) self.label_2.setText(_translate("Dialog", "CG Location")) self.label_3.setText(_translate("Dialog", "Wheel & Tire Weight")) self.label_4.setText(_translate("Dialog", "Tire Radius")) self.doubleSpinBox_bodyweight.setSuffix(_translate("Dialog", " kg")) self.doubleSpinBox_CG.setSuffix(_translate("Dialog", " m")) self.doubleSpinBox_wheelweight.setSuffix(_translate("Dialog", " kg")) self.doubleSpinBox_tireradius.setSuffix(_translate("Dialog", " m")) self.groupBox_4.setTitle(_translate("Dialog", "Wishbone Data")) self.label_10.setText(_translate("Dialog", "Wishbone Length")) self.label_12.setText(_translate("Dialog", "Number of Wishbones")) self.doubleSpinBox_wblength.setSuffix(_translate("Dialog", " m")) self.groupBox_5.setTitle(_translate("Dialog", "Initial Conditions")) self.label_13.setText(_translate("Dialog", "Velocity in the X")) self.label_14.setText(_translate("Dialog", "Velocity in the Y")) self.doubleSpinBox_initXvel.setSuffix(_translate("Dialog", " m/s")) self.doubleSpinBox_initYvel.setSuffix(_translate("Dialog", " m/s")) self.groupBox_6.setTitle(_translate("Dialog", "Calculated Data")) self.pushButton_solve.setText(_translate("Dialog", "Solve")) self.label_15.setText(_translate("Dialog", "Suspension Sag")) self.doubleSpinBox_sag.setSuffix(_translate("Dialog", " %")) self.label_17.setText(_translate("Dialog", "Total Suspension Sag")) self.doubleSpinBox_maxsag.setSuffix(_translate("Dialog", " m")) self.label_18.setText(_translate("Dialog", "Output")) self.pushButton_optimizespring.setText(_translate("Dialog", "Optimize Shock Springrate for Ideal Sag")) self.doubleSpinBox_shockspringoptimized.setSuffix(_translate("Dialog", " N m")) self.label_19.setText(_translate("Dialog", "Optimized Shock Springrate")) self.pushButton_exit.setText(_translate("Dialog", "Exit")) self.pushButton_clear.setText(_translate("Dialog", "Clear")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Dialog = QtWidgets.QDialog() ui = Ui_Dialog() ui.setupUi(Dialog) Dialog.show() sys.exit(app.exec_())
# %matplotlib inline # magic import pandas as pd import matplotlib.pyplot as plt import sys import click # dir stuff # import os # os.getcwd() # os.chdir(r'C:\Users\lknogler\Desktop\python\nick\Day1') # filename = 'oktoberfestgesamt19852016.csv' @click.group() def cli(): """Can display and plot csv files""" pass @cli.command() @click.argument('filename') def display(filename): """Displays column names and data type""" df=pd.read_csv(filename) # df.head() # first 5 rows print(df.dtypes) @cli.command() @click.argument('filename') @click.option('--column', default=None, help='Name of column to plot, inputted as --column column_name') def plot(filename, column): """Plots a histogram of one or all column(s) of the csv file""" df=pd.read_csv(filename) if column is None: df.hist(); else: df[column].hist() plt.show() # if slow, this blocks all subsequent code from running # it waits for the window to close before continuing # called in terminal as: python csv_parser.py command filename --column column_name if __name__ == '__main__': # display_columns() # plot_hist() cli()
PICS_OF_CATS_DATASET = { "id": "1", "title": "This Study is about Pictures of Cats", "author": "Peter Bull", "description": "In this study we prove there can be pictures of cats.", } ATOM_DATASET = "../resources/atom-entry-study.xml"
######################################################################################################################## __doc__ = \ """ This add the oddity fixing functionality. Generally odd corner cases. Formerly part of collapse_ring.py """ ######################################################################################################################## from rdkit import Chem import itertools from typing import List, Tuple, Union from ._base import _RectifierBase # when hits are combined they can result in odd valence and other issues. class _RectifierValence(_RectifierBase): """ Checks whether the valence is right and corrects by either shifting the element or adding a charge. With the following exceptions: * Texas carbon -> Sulfur * Hydrogen -> Fluoride shifted downwards * Carbon in aromatic ring -> single bond ring """ # ========= Three main steps ======================================================================================= def ununspecified_bonds(self) -> None: """ This get run during instantiation. It is the first one run. BondType.UNSPECIFIED ==> BondType.SINGLE :return: None """ for bond in self.rwmol.GetBonds(): if bond.GetBondType().name == 'UNSPECIFIED': self.log.debug(f'Fixing unspecified bond {bond.GetIdx()}') bond.SetBondType(Chem.BondType.SINGLE) # debug: self.modifications.append(self.mol) def triage_rings(self) -> None: """ This get run during instantiation. It is the second one run. It is also run again by `fix_issues` the third step after a weird valence change. Deals with rings. :return: None """ # upgrade to aromatic if aromatic. rings = self._get_ring_info() for ring in rings: if self._is_aromatic_ring(ring, rings): for i, ni in self._get_ring_neighbors(ring): self.rwmol.GetBondBetweenAtoms(i, ni).SetBondType(Chem.BondType.AROMATIC) # downgrade to single if non-ring aromatic for i, atom in enumerate(self.rwmol.GetAtoms()): if any([i in r for r in rings]): continue # ring else: # non-ring for bond in atom.GetBonds(): if bond.GetBondType().name == 'AROMATIC': self.log.debug(f'downgrading bond {i}') bond.SetBondType(Chem.BondType.SINGLE) # aromatics for ring in sorted(rings, key=self._is_aromatic_ring): if self._is_aromatic_ring(ring, rings): # the nonaromatic rings will be done first. for i in ring: self.rwmol.GetAtomWithIdx(i).SetIsAromatic(True) else: for i in ring: self.rwmol.GetAtomWithIdx(i).SetIsAromatic(False) self.modifications.append(self.mol) # may not have changed. def fix_issues(self, _previous=None) -> None: """ This get run during instantiation. It is the third and final one run before sanitization. Deals with a variety of problems. It calls itself until no problems according to `Chem.DetectChemistryProblems` exits. It is a bit shoddy and any oddity likely steps from here. TODO :return: None """ self.modifications.append(self.mol) # may not have changed. problems = Chem.DetectChemistryProblems(self.rwmol) if self._iterations_done > 100: self.log.error(f'Iterations maxed out!') return None elif self._subiterations_done > 5: self.log.error(f'Unfixable') return None elif len(problems) == 0: return None else: self.log.debug(f'(Iteration: {self._iterations_done}) N problems {len(problems)}') p = problems[0] self.log.debug(f'(Iteration: {self._iterations_done}) Issue {p.GetType()}: {p.Message()}') if p.Message() == _previous: self.triage_rings() ############################################################ if p.GetType() == 'KekulizeException': if p.Message() != _previous: N = self._get_nitrogens(p.GetAtomIndices()) if len(N) > 0 and self._nitrogen_protonate(N, p.Message()): pass # been fixed. else: # triage rings should have altered any not ring atoms that are aromatic. # self._get_ring_info() # so it is likely a hetatom thing. self.log.info(f'Ring triages seems to have failed. Is it a valence thing?') valence_issues = [self._has_correct_valence(i) for i in p.GetAtomIndices()] if not all(valence_issues): for i in p.GetAtomIndices(): self.fix_valence(i) else: self.log.warning(f'Attempting default valency (not max)') self._valence_mode = 'default' for i in p.GetAtomIndices(): self.fix_valence(i) self._valence_mode = 'max' else: for i in p.GetAtomIndices(): self.downgrade_ring(self.rwmol.GetAtomWithIdx(i)) self.triage_rings() ############################################################ elif p.GetType() == 'AtomKekulizeException' and 'non-ring atom' in p.Message(): atom = self.rwmol.GetAtomWithIdx(p.GetAtomIdx()) atom.SetIsAromatic(False) self.log.debug(f'Atom {p.GetAtomIdx()} set to non-aromatic.') for bond in atom.GetBonds(): bond.SetBondType(Chem.BondType.SINGLE) elif p.GetType() == 'AtomKekulizeException' and 'Aromatic bonds on non aromatic atom' in p.Message(): atom = self.rwmol.GetAtomWithIdx(p.GetAtomIdx()) self.log.debug(f'Atom {p.GetAtomIdx()} set to aromatic.') atom.SetIsAromatic(True) ############################################################ elif p.GetType() == 'AtomValenceException': i = p.GetAtomIdx() self.fix_valence(i) else: self.log.error('???', p.GetType(), p.Message()) self._iterations_done += 1 if _previous != p.Message(): self.log.debug(f'{self._iterations_done} appears successful.') self._subiterations_done = 0 else: self._subiterations_done += 1 self.log.debug(f'{self._iterations_done} appears unsuccessful.') return self.fix_issues(_previous=p.Message()) # ========= Methods that circumvent the nonsanitization ============================================================ def _get_valence_difference(self, atom: Chem.Atom) -> int: pt = Chem.GetPeriodicTable() valence = self._get_atom_valence(atom) if self._valence_mode == 'max': maxv = max(pt.GetValenceList(atom.GetAtomicNum())) return valence - maxv else: d = pt.GetDefaultValence(atom.GetAtomicNum()) return valence - d def _get_atom_valence(self, atom: Chem.Atom): """ Cannot get the normal way as it cannot be sanitised. :param atom: :return: """ valence = 0 for bond in atom.GetBonds(): valence += bond.GetBondTypeAsDouble() return valence - atom.GetFormalCharge() def _has_correct_valence(self, atom: Union[Chem.Atom, int]): if isinstance(atom, Chem.Atom): return self._get_valence_difference(atom) <= 0 elif isinstance(atom, int): atom = self.rwmol.GetAtomWithIdx(atom) return self._get_valence_difference(atom) <= 0 # ========= rings ================================================================================================== def _get_atoms_at_fusion(self, ring, rings): fused = [] if rings is not None: for other in rings: if other == ring: pass elif not set(ring).isdisjoint(other): fused.extend(set(ring).intersection(other)) return fused def _is_aromatic_ring(self, ring: Tuple[int], rings=None) -> bool: """ :param ring: GetRingInfo().AtomRings() entry :return: """ fused = self._get_atoms_at_fusion(ring, rings) for i in list(set(ring).difference(fused)): atom_i = self.rwmol.GetAtomWithIdx(i) for n in atom_i.GetNeighbors(): ni = n.GetIdx() if ni in ring: if self.rwmol.GetBondBetweenAtoms(i, ni).GetBondType().name == 'AROMATIC': return True else: return False def _get_ring_neighbors(self, ring: Tuple[int]) -> List[Tuple[int, int]]: """ :param ring: GetRingInfo().AtomRings() entry :return: list of pairs of indices that are neighbors in the ring """ rns = [] for i in ring: atom = self.rwmol.GetAtomWithIdx(i) for n in atom.GetNeighbors(): ni = n.GetIdx() if ni in ring: rns.append((i, ni)) return rns def _get_aroma(self, atom, this_bond): # determine if the bond of the atom is aromatic return [b for b in atom.GetBonds() if b.GetIdx() != this_bond and b.GetBondType().name == 'AROMATIC'] def _get_other(self, bond, these_atoms): others = [a for a in (bond.GetBeginAtom(), bond.GetEndAtom()) if a.GetIdx() not in these_atoms] if others: other = others[0] other.SetIsAromatic(False) return other def downgrade_ring(self, atom: Chem.Atom): ## very crappy way of doing this self.log.debug(f'downgrading whole ring!') atom.SetIsAromatic(False) ringinfo = self._get_ring_info(mode='atom') get_atomrings = lambda ai: [ring for ring in ringinfo if ai in ring] atomrings = get_atomrings(atom.GetIdx()) for atomring in atomrings: rnieghs = self._get_ring_neighbors(atomring) for n1, n2 in rnieghs: self.rwmol.GetAtomWithIdx(n1).SetIsAromatic(False) self.rwmol.GetAtomWithIdx(n2).SetIsAromatic(False) self.rwmol.GetBondBetweenAtoms(n1, n2).SetBondType(Chem.BondType.SINGLE) for atomring in atomrings: rnieghs = self._get_ring_neighbors(atomring) for n1, n2 in rnieghs: if self._get_valence_difference(self.rwmol.GetAtomWithIdx(n1)) <= -2 and \ self._get_valence_difference(self.rwmol.GetAtomWithIdx(n2)) <= -2: self.rwmol.GetBondBetweenAtoms(n1, n2).SetBondType(Chem.BondType.DOUBLE) # if len(self._get_rings(atom.GetIdx())) == 1: # for bond in atom.GetBonds(): # bond.SetBondType(Chem.BondType.SINGLE) # other = self._get_other(bond, [atom.GetIdx()]) # aro = self._get_aroma(other, bond.GetIdx()) # if aro: # aro[0].SetBondType(Chem.BondType.DOUBLE) # doubleother = self._get_other(aro[0], [atom.GetIdx(), other.GetIdx()]) # for b in doubleother.GetBonds(): # if b.GetBondType() == Chem.BondType.AROMATIC: # b.SetBondType(Chem.BondType.SINGLE) # neigh = self._get_other(b, [doubleother.GetIdx()]) # if neigh: # neigh.SetIsAromatic(False) # ========= Sanitization based fixes =============================================================================== def _nitrogen_protonate(self, nitrogens, previous): """ :param nitrogens: list of Nitrogens :param previous: :return: """ def reset(): for n in nitrogens: self.rwmol.GetAtomWithIdx(n).SetNumExplicitHs(0) reset() p = Chem.DetectChemistryProblems(self.rwmol) if len(p) == 0 or p[0].Message() != previous: return True for i in range(1, len(nitrogens)): for c in itertools.combinations(nitrogens, i): reset() for n in c: self.rwmol.GetAtomWithIdx(n).SetNumExplicitHs(1) p = Chem.DetectChemistryProblems(self.rwmol) if len(p) == 0 or p[0].Message() != previous: return True return False self.log.debug(f'KekulizeException likely caused by nitrogen') # ========= other helpers ========================================================================================== def _get_nitrogens(self, indices): """ Called when ``KekulizeException`` happends during ``.fix_issues`` :param indices: :return: """ return [i for i in indices if self.rwmol.GetAtomWithIdx(i).GetSymbol() == 'N'] # ========= shift/charge =========================================================================================== def _adjust_for_fix_valence(self, atom): df = self._get_valence_difference(atom) ori = atom.GetSymbol() if self.valence_correction == 'charge': atom.SetFormalCharge(df) elif self.valence_correction == 'element': # ## correct row n = atom.GetAtomicNum() if n == 1: atom.SetAtomicNum(8) elif n > 10: n = (n % 8) - 2 + 8 atom.SetAtomicNum(n) # ## correct column if len(atom.GetNeighbors()) > 4: self._break_bonds(atom) # elif len(atom.GetNeighbors()) > 4 and n <= 16: # S... # atom.SetAtomicNum(16) elif n - df < 6: # C -> B no! for bond in atom.GetBonds(): bond.SetBondType(Chem.BondType.SINGLE) else: # N, O, F etc. atom.SetAtomicNum(int(n - df)) self.log.info(f'Shifting atom from {ori} to {atom.GetSymbol()}') else: raise ValueError(f'self.valence_correction can only be "element"/"charge" not {self.valence_correction}.') def _break_bonds(self, atom): """ Extreme last ditch. Breaks off all non-ring bonds to atom. Will likely trigger emergency_joining. :param atom: :return: """ self.log.warning(f'In molecule ({self.name}) reaking bond to atom {atom.GetIdx()}') ring_indices = [a for ring in self._get_ring_info() for a in ring] for neigh in atom.GetNeighbors(): if neigh.GetIdx() in ring_indices or neigh.GetSymbol() == '*': continue else: self.rwmol.RemoveBond(atom.GetIdx(), neigh.GetIdx()) def fix_valence(self, i): atom = self.rwmol.GetAtomWithIdx(i) atom.SetFormalCharge(0) atom.SetNumExplicitHs(0) self.log.debug(f'{i} {atom.GetSymbol()}: {len(atom.GetNeighbors())} bonds {self._get_atom_valence(atom)}') if self._has_correct_valence(atom): self.log.debug('\tValence seems correct') return None elif atom.GetSymbol() == 'C' and len(atom.GetNeighbors()) > 4: self.log.debug('\ttexas carbon --> S') atom.SetAtomicNum(16) elif atom.GetSymbol() == 'C' and atom.GetIsAromatic() and len(atom.GetNeighbors()) == 4: self.log.debug('\tDowngrading ring') self.downgrade_ring(atom) elif atom.GetSymbol() == 'C': for bond in atom.GetBonds(): bond.SetBondType(Chem.BondType.SINGLE) else: self._adjust_for_fix_valence(atom) # did it work? if self._has_correct_valence(atom): return self.rwmol else: return self.fix_valence(i)
# Copyright (c) 2017, CNRS-LAAS # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections import namedtuple from functools import reduce import typing as ty import gdal import numpy as np from typing import List, Tuple, Union from osgeo import osr import matplotlib import matplotlib.pyplot as plt from fire_rs.deprecation import deprecated # A georeferenced point Point = namedtuple('Point', 'x, y') # Indexes of a cell in a raster Cell = namedtuple('Cell', 'x, y') # Georeferenced point with timestamp TimedPoint = namedtuple('TimedPoint', 'x, y, time') Area = namedtuple('Area', 'xmin, xmax, ymin, ymax') EPSG_RGF93_LAMBERT93 = 2154 # Lambert93 projected coordinate system (France) EPSG_RGF93 = 4171 # Lambert93 Geodetic coordinate system EPSG_ETRS89_LAEA = 3035 # Lambert Azimuthal Equal-Area projection (Europe) EPSG_ETRS89 = 4258 # European Terrestrial Reference System (Europe) EPSG_WGS84_UTM29N = 32629 # Lambert Azimuthal Equal-Area projection (Europe) EPSG_WGS84 = 4326 # European Terrestrial Reference System (Europe) class GeoData: """Container for geo-referenced raster data stored in a structured numpy array.""" def __init__(self, array, x_offset, y_offset, cell_width, cell_height, projection: Union[int, str, osr.SpatialReference] = EPSG_RGF93_LAMBERT93): """Create a GeoData projection can be an EPSG code (int), WKT (str) or osr.SpatialReference object""" assert cell_width > 0 and cell_height > 0, 'Origin must be on left-bottom' self.data = array # type: 'np.array' self.x_offset = x_offset self.y_offset = y_offset self.cell_width = cell_width self.cell_height = cell_height if isinstance(projection, int): self._projection_epsg = projection proj = osr.SpatialReference() proj.ImportFromEPSG(projection) # default is EPSG:2154, the RGF93/Lambert-93 projection self._projection = proj elif isinstance(projection, osr.SpatialReference): self._projection = projection self._projection.AutoIdentifyEPSG() self._projection_epsg = int(self._projection.GetAuthorityCode(None)) else: # str proj = osr.SpatialReference() proj.ImportFromWkt(projection) self._projection = proj self._projection.AutoIdentifyEPSG() self._projection_epsg = int(self._projection.GetAuthorityCode(None)) self.max_x = array.shape[0] self.max_y = array.shape[1] def as_cpp_raster(self, layer_name=None): import fire_rs.uav_planning as up assert self.cell_width == self.cell_height if layer_name is None: assert len(self.layers) == 1 return up.DRaster(self.data, self.x_offset, self.y_offset, self.cell_height) assert layer_name in self.data.dtype.names return up.DRaster(self.data[layer_name], self.x_offset, self.y_offset, self.cell_height) @staticmethod def from_cpp_raster(raster, layer_name, projection=EPSG_RGF93_LAMBERT93): ar = np.array(raster.as_numpy(), dtype=[(layer_name, 'float64')]) gd = GeoData(ar, raster.x_offset, raster.y_offset, raster.cell_width, raster.cell_width, projection=projection) return gd @staticmethod def from_cpp_long_raster(lraster, layer_name, projection=EPSG_RGF93_LAMBERT93): ar = np.array(lraster.as_numpy(), dtype=[(layer_name, 'int64')]) gd = GeoData(ar, lraster.x_offset, lraster.y_offset, lraster.cell_width, lraster.cell_width, projection=projection) return gd @classmethod def zeros_like(cls, other: 'GeoData'): return cls(np.zeros_like(other.data), other.x_offset, other.y_offset, other.cell_width, other.cell_height, projection=other.projection) @classmethod def full_like(cls, other: 'GeoData', fill_value): return cls(np.full_like(other.data, fill_value), other.x_offset, other.y_offset, other.cell_width, other.cell_height, projection=other.projection) def __contains__(self, coordinates): (x, y) = coordinates x_lim_low = self.x_offset - self.cell_width / 2 x_lim_up = self.x_offset + (self.data.shape[0] + .5) * self.cell_width y_lim_low = self.y_offset - self.cell_height / 2 y_lim_up = self.y_offset + (self.data.shape[1] + .5) * self.cell_height return x_lim_low <= x <= x_lim_up and y_lim_low <= y <= y_lim_up def __repr__(self): return self.data.__repr__() def __getitem__(self, item): return self.data[item] @property def projection(self): return self._projection @property def projection_epsg(self): return self._projection_epsg @property def layers(self): return self.data.dtype.names @property def data_display(self): """Data array in display form.""" return self.data.T[::-1, ...] def slice(self, layers: 'Union[List, str]') -> 'GeoData': """Builds a new GeoData with a subset of the layers""" assert len(layers) >= 1 if isinstance(layers, str): layers = [layers] if len(layers) == 1: layer = layers[0] t = self.data.dtype.fields[layer][0] # type of layer return self.clone(data_array=self.data[layer], dtype=[(layer, t)]) else: unary_slices = [self.slice([l]) for l in layers] return reduce(lambda x, y: x.combine(y), unary_slices[1:], unary_slices[0]) def subset(self, area: Area) -> 'GeoData': # FIXME: This function does not crop correctly some geodata. # 'area' seems not to be taken as an extent (xi_min, yi_min) = self.array_index(Point(area.xmin, area.ymin)) (xi_max, yi_max) = self.array_index(Point(area.xmax, area.ymax)) ary = self.data[xi_min:xi_max + 1, yi_min:yi_max + 1] return GeoData(ary, *self.coordinates(Cell(xi_min, yi_min)), self.cell_width, self.cell_height, projection=self.projection) def array_index(self, coordinates: Point) -> Union[Cell, Tuple[int, int]]: (x, y) = coordinates xi = int(round((x - self.x_offset) / self.cell_width)) yi = int(round((y - self.y_offset) / self.cell_height)) return Cell(xi, yi) def coordinates(self, indices: Cell) -> Point: (xi, yi) = indices x = self.x_offset + xi * self.cell_width y = self.y_offset + yi * self.cell_height return Point(x, y) def append_right(self, other: 'GeoData') -> 'GeoData': assert self.data.dtype == other.data.dtype assert self.cell_width == other.cell_width and self.cell_height == other.cell_height assert self.y_offset == other.y_offset assert self.x_offset + self.data.shape[0] * self.cell_width == other.x_offset assert self.data.shape[1] == other.data.shape[1] combined_array = np.concatenate([self.data, other.data], axis=0) return GeoData(combined_array, self.x_offset, self.y_offset, self.cell_width, self.cell_height, projection=self.projection) def append_bottom(self, other: 'GeoData') -> 'GeoData': assert self.data.dtype == other.data.dtype assert self.cell_width == other.cell_width and self.cell_height == other.cell_height assert self.x_offset == other.x_offset assert self.y_offset + self.data.shape[1] * self.cell_height == other.y_offset assert self.data.shape[0] == other.data.shape[0] combined_array = np.concatenate([self.data, other.data], axis=1) return GeoData(combined_array, self.x_offset, self.y_offset, self.cell_width, self.cell_height, projection=self.projection) def combine(self, other: 'GeoData') -> 'GeoData': assert self.data.shape == other.data.shape assert self.cell_width == other.cell_width and self.cell_height == other.cell_height combined_array = join_structured_arrays([self.data, other.data]) return GeoData(combined_array, self.x_offset, self.y_offset, self.cell_width, self.cell_height, projection=self.projection) def split(self, x_splits: int, y_splits: int) -> List['GeoData']: if x_splits == 1: splet = np.split(self.data, y_splits, axis=1) curr_y_offset = self.y_offset res = [] for ary in splet: res.append(GeoData(ary, self.x_offset, curr_y_offset, self.cell_width, self.cell_height, projection=self.projection)) curr_y_offset += res[-1].data.shape[1] * self.cell_height assert len(res) == x_splits * y_splits return res else: splet_on_y = self.split(x_splits=1, y_splits=y_splits) res = [] for gd in splet_on_y: splet_on_x_y = np.split(gd.data, x_splits, axis=0) curr_x_offset = gd.x_offset for ary in splet_on_x_y: res.append(GeoData(ary, curr_x_offset, gd.y_offset, self.cell_width, self.cell_height, projection=self.projection)) curr_x_offset += res[-1].data.shape[0] * self.cell_width assert len(res) == x_splits * y_splits return res def clone(self, data_array=None, fill_value=None, dtype=None): """Returns a clone of this GeoData. If data_array != None, the array is used as the internal data structure. If dtype and fill_value are not None, a new array of the same shape and the given dtype is created and filled with fill_value. Otherwise, a copy of self.data is used. """ if data_array is None and fill_value is None: assert dtype is None return GeoData(self.data.copy(), self.x_offset, self.y_offset, self.cell_width, self.cell_height, projection=self.projection) elif data_array is not None: assert fill_value is None # impose data-type if provided data_array = data_array if dtype is None else np.array(data_array, dtype=dtype) assert data_array.shape == self.data.shape, 'The passed array as a different shape' return GeoData(data_array, self.x_offset, self.y_offset, self.cell_width, self.cell_height, projection=self.projection) else: assert fill_value is not None and dtype is not None d = np.full(self.data.shape, fill_value, dtype=dtype) return GeoData(d, self.x_offset, self.y_offset, self.cell_width, self.cell_height, projection=self.projection) def _get_plot_data(self, downscale): # axes with labels ax = plt.figure().gca(aspect='equal', xlabel="X position [m]", ylabel="Y position [m]") ax_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) ax.yaxis.set_major_formatter(ax_formatter) ax.xaxis.set_major_formatter(ax_formatter) # inverted and downscaled raster data = self.data.T[::-1, ...][::downscale, ::downscale] # downscaled and geo-transformed ticks x = (np.arange(data.shape[0]) * self.cell_width + self.x_offset) y = (np.arange(data.shape[1]) * self.cell_height + self.y_offset) # image geographic bounds image_scale = (x[0], x[-1], y[0], y[-1]) return ax, data, x, y, image_scale @deprecated("Use new API fire_rs.geodata.display.GeoDataDisplay") def plot_vector(self, dir_layer, length_layer, downscale=1, blocking=False): ax, data, x, y, image_scale = self._get_plot_data(downscale) vel = data[length_layer] ang = data[dir_layer] wx = vel * np.cos(ang) wy = vel * np.sin(ang) ax.quiver(*np.meshgrid(x, y), wx, wy, pivot='middle', color='dimgrey') plt.show(block=blocking) @deprecated("Use new API fire_rs.geodata.display.GeoDataDisplay") def plot(self, layer=None, downscale=1, blocking=False, show=True, **kwargs): ax, data, x, y, image_scale = self._get_plot_data(downscale) if layer is None: assert len(self.data.dtype) == 1 layer = self.data.dtype.names[0] z = data[layer] ax.imshow(z, extent=image_scale, vmin=z.min(), vmax=z.max(), cmap=kwargs.get('cmap', matplotlib.cm.terrain), interpolation='none') if show: plt.show(block=blocking) return ax def write_to_separate_files(self, parameterized_filename: str): """Writes each layer to a distinct GeoTiff file. The filename should contain %s which will replaced by name of each layer.""" assert '%s' in parameterized_filename, 'File name should contain %s which will be replaced by the layer name' for layer in self.data.dtype.names: file = parameterized_filename % layer self.write_to_file(file, layer) def write_to_file(self, filename: str, layer_name: ty.Optional[str] = None, nodata: ty.Optional[float] = None): """Writes to GeoTiff file. If a layer_name is provided, then only the corresponding layer will be written, otherwise the GeoTiff file will contain all layers.""" layers = self.data.dtype.names if layer_name is None else [layer_name] if self.cell_height < 0: data = self.data.transpose() # in "image" files, rows and columns are inverted cell_height = self.cell_height origin_y = self.y_offset - self.cell_height / 2 # + array.shape[1] * pixelHeight else: # workaround a wind ninja bug that does not work with non-negative cell height # hence, we invert our matrix on the y axis (x axis after transpose) # to have a negative cell_height data = self.data.transpose()[::-1, ...] cell_height = - self.cell_height origin_y = self.y_offset + self.data.shape[1] * self.cell_height - self.cell_height / 2 cols = data.shape[1] rows = data.shape[0] origin_x = self.x_offset - self.cell_width / 2 driver = gdal.GetDriverByName('GTiff') out_raster = driver.Create(filename, cols, rows, len(layers), gdal.GDT_Float64) out_raster.SetGeoTransform((origin_x, self.cell_width, 0, origin_y, 0, cell_height)) # out_raster.SetMetadata({'COMPRESSION': 'LZW', 'INTERLEAVE': 'BAND'}, 'IMAGE_STRUCTURE') for i, layer in enumerate(layers): outband = out_raster.GetRasterBand(i + 1) if nodata is not None: outband.SetNoDataValue(nodata) outband.WriteArray(data[layer]) outband.SetDescription( layer) # apparently not visible in QGIS, maybe there is a better alternative outband.FlushCache() out_raster.SetProjection(self.projection.ExportToWkt()) def write_to_image_file(self, filename: str, layer_name): """Writes to PNG file.""" layers = self.data.dtype.names if layer_name is None else [layer_name] if self.cell_height < 0: data = self.data.transpose() # in "image" files, rows and columns are inverted cell_height = self.cell_height origin_y = self.y_offset - self.cell_height / 2 # + array.shape[1] * pixelHeight else: # workaround a wind ninja bug that does not work with non-negative cell height # hence, we invert our matrix on the y axis to have a negative cell_height data = self.data.transpose()[::-1, ...] cell_height = - self.cell_height origin_y = self.y_offset + self.data.shape[1] * self.cell_height - self.cell_height / 2 cols = data.shape[1] rows = data.shape[0] origin_x = self.x_offset - self.cell_width / 2 driver_mem = gdal.GetDriverByName('MEM') mem_raster = driver_mem.Create(filename, cols, rows, len(layers), gdal.GDT_UInt16) mem_raster.SetGeoTransform((origin_x, self.cell_width, 0, origin_y, 0, cell_height)) for i, layer in enumerate(layers): outband = mem_raster.GetRasterBand(i + 1) outband.WriteArray(data[layer]) outband.SetDescription( layer) # apparently not visible in QGIS, maybe there is a better alternative outband.FlushCache() mem_raster.SetProjection(self.projection.ExportToWkt()) driver_png = gdal.GetDriverByName('PNG') driver_png.CreateCopy(filename, mem_raster, True, ["WORLDFILE=YES"]) @classmethod def load_from_file(cls, filename: str): handle = gdal.Open(filename) proj_wkt = handle.GetProjection() proj = osr.SpatialReference(wkt=proj_wkt) geotransform = handle.GetGeoTransform() x_delta = geotransform[1] y_delta = geotransform[5] x_orig = geotransform[0] + x_delta / 2 y_orig = geotransform[3] + y_delta / 2 layers = [] for i in range(handle.RasterCount): raster_band = handle.GetRasterBand(i + 1) # Band indices start at 1 band_name = raster_band.GetDescription() if not band_name: band_name = "band {}".format(i + 1) layer = np.array(raster_band.ReadAsArray(), dtype=[(band_name, np.float64)]) layers.append(layer) array = join_structured_arrays(layers) if y_delta > 0: array = array.transpose() # in "image" files, rows and columns are inverted else: # workaround a wind ninja bug that does not work with non-negative cell height # hence, we invert our matrix on the y axis to have a negative cell_height y_orig = geotransform[3] + handle.RasterYSize * y_delta - y_delta / 2 y_delta = -y_delta array = array[..., ::-1].transpose() return cls(array, x_orig, y_orig, x_delta, y_delta, projection=proj) class CoordinateTransformation: def __init__(self, from_epsg: int, to_epsg: int): self._s_srs = osr.SpatialReference() self._s_srs.ImportFromEPSG(from_epsg) self._t_srs = osr.SpatialReference() self._t_srs.ImportFromEPSG(to_epsg) self._transform = osr.CreateCoordinateTransformation(self._s_srs, self._t_srs) def transform(self, x: float, y: float) -> Tuple[float, float]: return self._transform.TransformPoint(x, y) def join_structured_arrays(arrays): """Efficient method to combine several structured arrays into a single one. This is based on the implementation of http://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays with modifications to account for n-dimensional arrays. It is equivalent (but much faster) to the pure numpy function: rfn.merge_arrays(arrays, flatten=False, usemask=False).reshape(arrays[0].shape) """ assert len(arrays) > 0 assert all([array.shape == arrays[0].shape for array in arrays]), "Arrays have different shapes" sizes = np.array([a.itemsize for a in arrays]) offsets = np.r_[0, sizes.cumsum()] n = arrays[0].size # total number of cell joint = np.empty((n, offsets[-1]), dtype=np.uint8) # copy each array into joint at the proper offsets for a, size, offset in zip(arrays, sizes, offsets): # reshape as a C-contiguous array of bytes tmp = np.ascontiguousarray(a).view(np.uint8).reshape(n, size) joint[:, offset:offset + size] = tmp dtype = sum((a.dtype.descr for a in arrays), []) return joint.ravel().view(dtype).reshape(arrays[0].shape)
""" Write a Python program to find those numbers which are divisible by 7 and multiple of 5, between 1500 and 2700 (both included). """ for num in range(1500, 2701): if num % 5 == 0 and num % 7 == 0: print(num)
# The MIT License (MIT) # # Copyright (c) 2016 Litrin Jiang # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # from abc import ABCMeta __all__ = [ 'Normal', 'NormalS', 'MA', 'EMA', ] class BaseAverage: __metaclass__ = ABCMeta count = 0 value = 0.0 def add_sample(self, sample): self.count += 1 if self.count == 1: self.value = sample # raise ImportError("Method haven't implemented!") def add(self,sample): return self.add_sample(sample) def __eq__(self, other): return self.value == other.value def __gt__(self, other): return self.value > other.value def __lt__(self, other): return self.value < other.value def __float__(self): return self.value def __len__(self): return self.count def __add__(self, other): return self.value + other.value def __sub__(self, other): return self.value - other.value class Normal(BaseAverage): sample_list = [] def add_sample(self, sample): self.count += 1 self.sample_list.append(sample) self.value = reduce(lambda a, b: a + b, self.sample_list) / float( self.count) return self.value class NormalS(BaseAverage): value = 0.0 def add_sample(self, sample): self.value = (self.value * self.count + sample) / (self.count + 1) self.count += 1 return self.value class MA(BaseAverage): def add_sample(self, sample): BaseAverage.add_sample(self, sample) self.value = (self.value + sample) / 2.0 return self.value class EMA(BaseAverage): window_size = None alpha = 1 def __init__(self, window_size): self.window_size = window_size self.alpha = 2.0 / (window_size + 1) def add_sample(self, sample): BaseAverage.add_sample(self, sample) self.value += (sample - self.value) * self.alpha return self.value
from django.db import models from django.utils.translation import ugettext_lazy as _ from model_utils.fields import AutoCreatedField, AutoLastModifiedField from . import JobStatus class IndexedTimeStampedModel(models.Model): created_at = AutoCreatedField(_("created"), db_index=True) updated_at = AutoLastModifiedField(_("modified"), db_index=True) class Meta: abstract = True class Job(models.Model): status = models.CharField(max_length=50, choices=JobStatus.CHOICES, default=JobStatus.PENDING) message = models.CharField(max_length=255, blank=True, null=True) created_at = AutoCreatedField(_("created"), db_index=True) updated_at = AutoLastModifiedField(_("modified"), db_index=True) class Meta: abstract = True
import unittest from minmax_heap import MinHeap, MaxHeap class Node(object): def __init__(self, name: str, distance: int): self.name = name self.distance = distance def __repr__(self): return f"({self.name},{self.distance})" def __lt__(self, other): return self.distance < other.distance class TestHeap(unittest.TestCase): def test_min_heap(self): xs = [10, 3, 5, 2, 19, 13, 1] min_heap = MinHeap(xs) sorted_xs = sorted(xs) for x in sorted_xs: self.assertEqual(x, min_heap.poll()) def test_max_heap(self): xs = [10, 3, 5, 2, 19, 13, 1] max_heap = MaxHeap(xs) sorted_xs = sorted(xs) for x in sorted_xs[::-1]: self.assertEqual(x, max_heap.poll()) def test_decrease_key(self): nodes = [Node("R", -1), Node("B", 6), Node("A", 3), Node("X", 1), Node("E", 4)] min_heap = MinHeap(nodes) min_heap.decrease_key("B", -17) root = min_heap.peek() self.assertEqual(root.name, "B") self.assertEqual(root.distance, -17) if __name__ == "__main__": unittest.main()
import numpy as np from imageio import formats from imageio.core import Format from . import read, write EXTENSIONS = '.mha', '.mhd' class MetaImageIOFormat(Format): def _can_read(self, request): return request.mode[1] in self.modes and request.extension in EXTENSIONS def _can_write(self, request): return request.mode[1] in self.modes and request.extension in EXTENSIONS class Reader(Format.Reader): def _open(self, **kwargs): self._filepath = self.request.get_local_filename() def _close(self): pass def _get_length(self): return np.inf def _get_data(self, index, **kwargs): if index != 0: raise NotImplementedError('MetaImageIO does not support non-zero indices') image, meta = read.read(self._filepath, **self.request.kwargs) if image is None: image = np.array(()) return image, meta def _get_meta_data(self, index): if index != 0: raise NotImplementedError('MetaImageIO does not support non-zero indices') _, meta = read.read(self._filepath, slices=()) return meta class Writer(Format.Writer): def _open(self, **kwargs): self._filepath = self.request.get_local_filename() def _close(self): pass def _append_data(self, im, meta): meta.pop('ElementDataFile', None) meta.update(self.request.kwargs) write.write(self._filepath, image=im, **meta) def set_meta_data(self, meta): raise NotImplementedError('MetaImageIO does not support writing meta data') def imageio(name='MetaImageIO'): if name.upper() not in formats.get_format_names(): names = formats.get_format_names() formats.add_format(MetaImageIOFormat( name, 'MetaImageIO', ' '.join(EXTENSIONS), 'iv')) formats.sort(name, *names) return name
# -*- coding:utf-8 -*- # /usr/bin/env python """ Date: 2021/6/22 16:13 Desc: 东方财富网-经济数据-银行间拆借利率 http://data.eastmoney.com/shibor/shibor.aspx?m=sg&t=88&d=99333&cu=sgd&type=009065&p=79 Attention: 大量获取容易封 IP, 建议 20 分钟后再尝试或者切换 WIFI 为手机热点, 也可以修改本函数只更新增量 """ import pandas as pd import requests from bs4 import BeautifulSoup from tqdm import tqdm from akshare.interest_rate.cons import market_symbol_indicator_dict, headers class IPError(Exception): """ Define IPError """ pass def _get_page_num( market: str = "上海银行同业拆借市场", symbol: str = "Shibor人民币", indicator: str = "隔夜" ) -> int: """ 具体市场具体品种具体指标的页面数量 http://data.eastmoney.com/shibor/shibor.aspx?m=sg&t=88&d=99333&cu=sgd&type=009065&p=79 :param market: ["上海银行同业拆借市场", "中国银行同业拆借市场", "伦敦银行同业拆借市场", "欧洲银行同业拆借市场", "香港银行同业拆借市场", "新加坡银行同业拆借市场"] :type market: str :param symbol: ["Shibor人民币", ***] :type symbol: str :param indicator: str :type indicator: ["隔夜", "1周", "2周", ***] :return: 具体市场具体品种具体指标的页面数量 :rtype: int """ need_url = market_symbol_indicator_dict[market][symbol][indicator] + "&p=1" res = requests.get(need_url, headers=headers) soup = BeautifulSoup(res.text, "lxml") try: page_num = ( soup.find("div", attrs={"class": "Page"}) .find_all("a", attrs={"target": "_self"})[-1]["href"] .split("=")[-1] ) except AttributeError as e: raise IPError("IP 被封了, 建议 20 分钟后再尝试或者切换 WIFI 为手机热点") return int(page_num) def rate_interbank( market: str = "上海银行同业拆借市场", symbol: str = "Shibor人民币", indicator: str = "隔夜", need_page="", ) -> pd.DataFrame: """ 具体市场具体品种具体指标的拆借利率数据 具体 market 和 symbol 参见: http://data.eastmoney.com/shibor/shibor.aspx?m=sg&t=88&d=99333&cu=sgd&type=009065&p=79 :param market: choice of {"上海银行同业拆借市场", "中国银行同业拆借市场", "伦敦银行同业拆借市场", "欧洲银行同业拆借市场", "香港银行同业拆借市场", "新加坡银行同业拆借市场"} :type market: str :param symbol: choice of {"Shibor人民币", ***} :type symbol: str :param indicator: str :type indicator: choice of {"隔夜", "1周", "2周", ***} :param need_page: 返回前 need_page 页的数据; e.g., need_page="5", 则只返回前5页的数据, 此参数可以用于增量更新, 以免被封 IP :type need_page: str :return: 具体市场具体品种具体指标的拆借利率数据 :rtype: pandas.DataFrame """ page_num = _get_page_num(market=market, symbol=symbol, indicator=indicator) temp_df = pd.DataFrame() if need_page == "": for page in tqdm(range(1, page_num + 1)): need_url = ( market_symbol_indicator_dict[market][symbol][indicator] + f"&p={page}" ) res = requests.get(need_url, headers=headers) table = pd.read_html(res.text)[0] temp_df = temp_df.append(table, ignore_index=True) temp_df.columns = [ "日期", "利率", "涨跌", ] return temp_df else: for page in tqdm(range(1, int(need_page) + 1)): need_url = ( market_symbol_indicator_dict[market][symbol][indicator] + f"&p={page}" ) res = requests.get(need_url, headers=headers) table = pd.read_html(res.text)[0] temp_df = temp_df.append(table, ignore_index=True) temp_df.columns = [ "日期", "利率", "涨跌", ] return temp_df if __name__ == "__main__": rate_interbank_shanghai_df = rate_interbank( market="上海银行同业拆借市场", symbol="Shibor人民币", indicator="3月", need_page="5" ) print(rate_interbank_shanghai_df) rate_interbank_df = rate_interbank( market="新加坡银行同业拆借市场", symbol="Sibor星元", indicator="1月", need_page="2" ) print(rate_interbank_df)
""" This problem will show how to use some of pytacs more advanced load setting proceedures. The nominal case is a 1m x 1m flat plate. The perimeter of the plate is fixed in all 6 degrees of freedom. The plate comprises 900 CQUAD4 elements. We consider two structural problems: 1. A static case where a 10 kN point forceis applied at the plate center 2. A transent probelm with a pressure load that varries in time and space given by: P(x,y,t) = Pmax * sin(2*pi*x/L) * sin(2*pi*y/L) * sin(2*pi*fhz*t) where: Pmax = 100 kPa fhz = 1.0 hz L = 1.0 m """ # ============================================================================== # Standard Python modules # ============================================================================== from __future__ import print_function import os # ============================================================================== # External Python modules # ============================================================================== from pprint import pprint import numpy as np from mpi4py import MPI # ============================================================================== # Extension modules # ============================================================================== from tacs import functions, constitutive, elements, pyTACS # Center load magnitude Q = 1e4 # Pressure magnitude Pmax = 100e3 # Pressure frequency fhz = 1.0 # Length of square plate L = 1.0 comm = MPI.COMM_WORLD # Instantiate FEAAssembler structOptions = { 'printtiming':True, } bdfFile = os.path.join(os.path.dirname(__file__), 'plate.bdf') FEAAssembler = pyTACS(bdfFile, options=structOptions) def elemCallBack(dvNum, compID, compDescript, elemDescripts, specialDVs, **kwargs): # Material properties rho = 2500.0 # density kg/m^3 E = 70e9 # Young's modulus (Pa) nu = 0.3 # Poisson's ratio ys = 464.0e6 # yield stress # Plate geometry tplate = 0.005 # 1 mm tMin = 0.0001 # 0.1 mm tMax = 0.05 # 5 cm # Set up property model prop = constitutive.MaterialProperties(rho=rho, E=E, nu=nu, ys=ys) # Set up constitutive model con = constitutive.IsoShellConstitutive(prop, t=tplate, tNum=dvNum, tlb=tMin, tub=tMax) transform = None # Set up element elem = elements.Quad4Shell(transform, con) scale = [100.0] return elem, scale # Set up constitutive objects and elements FEAAssembler.initialize(elemCallBack) # List to hold all problems allProblems = [] # Setup static problem staticProb = FEAAssembler.createStaticProblem(name='point_force') # Add functions staticProb.addFunction('mass', functions.StructuralMass) staticProb.addFunction('ks_vmfailure', functions.KSFailure, ksWeight=100.0) # Add point force to node 481 (center of plate) F = np.array([0.0, 0.0, Q, 0.0, 0.0, 0.0]) staticProb.addLoadToNodes(481, F, nastranOrdering=True) # Add static problem to list allProblems.append(staticProb) # Setup transient problem # turn on print for solver in options transientOptions = {'printlevel':1} transientProb = FEAAssembler.createTransientProblem(name='pressure', tInit=0.0, tFinal=10.0, numSteps=50, options=transientOptions) # Add functions transientProb.addFunction('mass', functions.StructuralMass) transientProb.addFunction('ks_vmfailure', functions.KSFailure, ksWeight=100.0) # Add presure load over plate # pynastran bdf object for parsing mesh info bdfInfo = FEAAssembler.getBDFInfo() # cross-reference bdf object to use some of pynastrans advanced features bdfInfo.cross_reference() # Loop through each element and get the spatial variation of the pressure Pxy = [] eIDs = [] for eID in bdfInfo.elements: [x, y, z] = bdfInfo.elements[eID].Centroid() Pmag = Pmax * np.sin(2 * np.pi * x / L) * np.sin(2 * np.pi * y / L) Pxy.append(Pmag) eIDs.append(eID) Pxy = np.array(Pxy) # Loop through each time step and add time varying load timeSteps = transientProb.getTimeSteps() for step_i, time in enumerate(timeSteps): # Multiply by time factor Pxyt = Pxy * np.sin(2 * np.pi * fhz * time) # Add pressure to problem for timestep transientProb.addPressureToElements(step_i, eIDs, Pxyt, nastranOrdering=True) # Add transient problem to list allProblems.append(transientProb) # Solve all problems and evaluate functions funcs = {} funcsSens = {} for problem in allProblems: problem.solve() problem.evalFunctions(funcs) problem.evalFunctionsSens(funcsSens) problem.writeSolution() if comm.rank == 0: pprint(funcs)
from xml.etree import ElementTree from sickle import Sickle from sickle.iterator import OAIResponseIterator, OAIItemIterator from sickle.oaiexceptions import BadArgument, BadResumptionToken, NoSetHierarchy import urllib.parse import logging import random import os class InvalidPrefixError(Exception): pass class OAIHarvester(object): """Downloads files from a OAI-PMH 2.0 API and stores them as xml.""" def __init__(self, base_url: str, metadata_prefix: str, path: str, base_file_name='harvest-result', user='', password='', logger=logging.getLogger('oai'), encoding='iso-8859-1'): """ Configure a basic connection to the OAI-Server. Sets up the sickle instance with appropriate settings and checks if the metadata prefix is valid. Creates a directory at path if no such path exists. :param base_url: Base url for the oai request without http:// :param metadata_prefix: Metadata-Prefix for the api_response to be harvested. :param path: Directory path where the files should be stored. :param base_file_name: Downloads are saved in this file. If several downloads are made the resumption token or a random number is added. :param user: User name for basic http authentication (unescaped) :param password: Password for basic http authentication (unescaped) :param logger: Logger used to log all actions and errors of this class. :param encoding: The encoding used to store elements :raises InvalidPrefixError if the given prefix is not valid. """ self.encoding = encoding self.logger = logger self.use_authentication = False if user != '': assert password != '' self.user = urllib.parse.quote(user) self.encoded_password = urllib.parse.quote(password) self.use_authentication = True self.logger.info('Uses authentication with credentials: user: %s, password: %s.', self.user, self.encoded_password) else: self.logger.info('No authentication given.') self.url = base_url self.path = path self.base_file_name = base_file_name self.metadataPrefix = metadata_prefix self.api_response = None self.data = list() if self.use_authentication: self.sickle = Sickle('https://' + self.user + ':' + self.encoded_password + '@' + self.url, iterator=OAIResponseIterator) else: self.sickle = Sickle('https://' + self.url, iterator=OAIResponseIterator) self._verify_metadata_prefix() if not os.path.exists(self.path): self.logger.info('Create directory at %s.', self.path) os.makedirs(self.path) def _verify_metadata_prefix(self): """ Verifies that the used metadata prefix is valid for this OAI repository. :raises InvalidPrefixError if the given prefix is not valid. """ # changes the sickle iterator to item to easily access metadata prefix. self.sickle.iterator = OAIItemIterator valid_prefix_list = list() metadata = self.sickle.ListMetadataFormats() is_valid_prefix = False while True: try: prefix = metadata.next().metadataPrefix except StopIteration: break valid_prefix_list.append(prefix) if prefix == self.metadataPrefix: is_valid_prefix = True if not is_valid_prefix: self.logger.critical('Given metadata prefix (%s) was not valid. Select one of these: %s', self.metadataPrefix, str(valid_prefix_list)) raise InvalidPrefixError('Invalid metadataPrefix: ' + self.metadataPrefix + '.\n' + ' A list of the available prefixes: ' + str(valid_prefix_list)) else: self.logger.info('The prefix given is valid.') def store_records(self, set_id=None, date=None, ignore_deleted=False): """ Downloads all records found on the OAI-API or all records from a given set. :param set_id: determine what set to download if a given set should be downloaded (default None) :type set_id: str :param date: Only records added/changed after this date will be downloaded (default None) :type date: str 'YYYY-MM-DD' :param ignore_deleted: When true ignores all deleted records. This may not be a feature available in all OAI archives. :type ignore_deleted bool """ self.sickle.iterator = OAIResponseIterator params = {'metadataPrefix': self.metadataPrefix, 'from': date, 'set': set_id, 'ignore_deleted': ignore_deleted} self.api_response = self.sickle.ListRecords(**params) self._write_all_records() def store_record(self, identifier: int): """ Downloads a single record with the given id and stores it in a file at the given place. :param identifier: the id which should be retrieved. """ self.sickle.iterator = OAIResponseIterator record = self.sickle.GetRecord(identifier=identifier, metadataPrefix=self.metadataPrefix) temp_xml = record.raw with open(self.path + self.base_file_name + str(identifier) + '.xml', 'w', encoding=self.encoding) as file: file.write(temp_xml) def iterate_sets(self): """Iterate through all sets available at the OAI repository. :return List of all sets as tupels (id, name) :rtype: iterator tuple (str, str) """ self.sickle.iterator = OAIItemIterator try: sets = self.sickle.ListSets() for s in sets: yield (s.setSpec, s.setName) except NoSetHierarchy as error: self.logger.warning(str(error)) raise NoSetHierarchy(error) def _write_all_records(self): """Writes all downloaded api_response into xml files.""" if self.api_response is None: self.logger.critical('No response loaded.') raise Exception('No response loaded.') record = self.api_response.next() last_count = 0 while record: temp_xml = record.raw if isinstance(temp_xml, str): root = ElementTree.fromstring(temp_xml) self.data.append(root) download_count = len(root[2]) - 1 last_count += download_count token = root[2][-1] total = 0 file = None try: file = open(self.path + self.base_file_name + '-' + token.text + '.xml', 'w', encoding=self.encoding) total = int(root[2][-1].get('completeListSize')) self.logger.info('Downloaded %s records from repository. Still %s to go.', download_count, total - last_count) file.write(temp_xml) record = self.api_response.next() except TypeError: # no resumption token found. file = open(self.path + self.base_file_name + '-' + str(random.randrange(100000)) + '.xml', 'w', encoding=self.encoding) self.logger.info('No resumption token found. Stopping Download. ' 'Downloaded %s from this repository.', total) file.write(temp_xml) record = None except (BadArgument, BadResumptionToken) as error: self.logger.critical('Stopped Download: "%s"', str(error)) record = None finally: if file is not None: file.close()
"""test create ddf dataset from csv""" from ddf_utils.cli import from_csv from click.testing import CliRunner import os from tempfile import mkdtemp from ddf_utils.model.package import DDFcsv from ddf_utils.package import create_datapackage def test_from_csv(): input_dir = os.path.join(os.path.dirname(__file__), 'raw_csv') out_dir = mkdtemp() test_runner = CliRunner() test_runner.invoke(from_csv, ['-i', input_dir, '-o', out_dir]) dp = create_datapackage(out_dir) d = DDFcsv.from_dict(dp, base_path=out_dir).ddf assert len(d.indicators()) == 31
import logging from config import Config try: filename = Config()['default']['logging_dir'] except: filename = '/tmp/dtt.log' logging.basicConfig(filename=filename, level=logging.DEBUG) def logger(name): return logging.getLogger(name)
#!/usr/bin/env python3 # Josh Goodman sp20-516-220 E.Cloudmesh.Common.5 import time from cloudmesh.common.StopWatch import StopWatch if __name__ == "__main__": StopWatch.start('timer1') time.sleep(1) StopWatch.start('timer2') time.sleep(1) StopWatch.stop('timer2') StopWatch.start('timer3') time.sleep(1) StopWatch.stop('timer1') time.sleep(1) StopWatch.stop('timer3') StopWatch.benchmark()
from os import environ from loader import Loader import actions LOADER = Loader() def lambda_handler(event, context): # return event status = LOADER.personalize_cli.describe_dataset_group( datasetGroupArn=event['datasetGroupArn'] )['datasetGroup'] actions.take_action_delete(status['status']) return status['status']
# -*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Resource flags and helpers for the deploy command group.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope.concepts import concepts from googlecloudsdk.calliope.concepts import deps from googlecloudsdk.command_lib.util.concepts import concept_parsers from googlecloudsdk.core import properties def DeliveryPipelineAttributeConfig(): """Creates the delivery pipeline resource attribute.""" return concepts.ResourceParameterAttributeConfig( name='delivery-pipeline', fallthroughs=[ deps.PropertyFallthrough( properties.FromString('deploy/delivery_pipeline')) ], help_text='The delivery pipeline associated with the {resource}. ' ' Alternatively, set the property [deploy/delivery-pipeline].') def AddReleaseResourceArg(parser, help_text=None, positional=False, required=True): """Add --release resource argument to the parser. Args: parser: argparse.ArgumentParser, the parser for the command. help_text: help text for this flag. positional: if it is a positional flag. required: if it is required. """ help_text = help_text or 'The name of the Release.' concept_parsers.ConceptParser.ForResource( 'release' if positional else '--release', GetReleaseResourceSpec(), help_text, required=required, plural=False).AddToParser(parser) def GetReleaseResourceSpec(): """Constructs and returns the Resource specification for Delivery Pipeline.""" return concepts.ResourceSpec( 'clouddeploy.projects.locations.deliveryPipelines.releases', resource_name='release', deliveryPipelinesId=DeliveryPipelineAttributeConfig(), releasesId=ReleaseAttributeConfig(), projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG, locationsId=LocationAttributeConfig(), disable_auto_completers=False) def ReleaseAttributeConfig(): """Creates the release resource attribute.""" return concepts.ResourceParameterAttributeConfig( name='release', help_text='The release associated with the {resource}.') def LocationAttributeConfig(): """Creates the location resource attribute.""" return concepts.ResourceParameterAttributeConfig( name='region', parameter_name='locationsId', fallthroughs=[ deps.PropertyFallthrough(properties.FromString('deploy/region')) ], help_text='The Cloud region for the {resource}. ' ' Alternatively, set the property [deploy/region].') def AddLocationResourceArg(parser): """Adds a resource argument for a cloud deploy region. NOTE: Must be used only if it's the only resource arg in the command. Args: parser: argparse.ArgumentParser, the parser for the command. """ concept_parsers.ConceptParser.ForResource( '--region', GetLocationResourceSpec(), 'The Cloud region of {resource}.', required=True).AddToParser(parser) def GetLocationResourceSpec(): """Constructs and returns the Resource specification for location.""" return concepts.ResourceSpec( 'clouddeploy.projects.locations', resource_name='location', locationsId=LocationAttributeConfig(), projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG, ) def TargetAttributeConfig(): """Creates the target resource attribute.""" return concepts.ResourceParameterAttributeConfig( name='target', help_text='The target associated with the {resource}.') def GetTargetResourceSpec(): """Constructs and returns the target specification for Target.""" return concepts.ResourceSpec( 'clouddeploy.projects.locations.targets', resource_name='target', targetsId=TargetAttributeConfig(), projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG, locationsId=LocationAttributeConfig(), disable_auto_completers=False) def AddTargetResourceArg(parser, help_text=None, positional=False, required=True): """Add target resource argument to the parser. Args: parser: argparse.ArgumentParser, the parser for the command. help_text: help text for this flag. positional: if it is a positional flag. required: if it is required. """ help_text = help_text or 'The name of the Target.' concept_parsers.ConceptParser.ForResource( 'target' if positional else '--target', GetTargetResourceSpec(), help_text, required=required, plural=False).AddToParser(parser) def AddDeliveryPipelineResourceArg(parser, help_text=None, positional=False, required=True): """Adds --delivery-pipeline resource argument to the parser. Args: parser: argparse.ArgumentParser, the parser for the command. help_text: help text for this flag. positional: if it is a positional flag. required: if it is required. """ help_text = help_text or 'The name of the Delivery Pipeline.' concept_parsers.ConceptParser.ForResource( 'delivery_pipeline' if positional else '--delivery-pipeline', GetDeliveryPipelineResourceSpec(), help_text, required=required, plural=False).AddToParser(parser) def GetDeliveryPipelineResourceSpec(): """Constructs and returns the Resource specification for Delivery Pipeline.""" return concepts.ResourceSpec( 'clouddeploy.projects.locations.deliveryPipelines', resource_name='delivery_pipeline', deliveryPipelinesId=DeliveryPipelineAttributeConfig(), projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG, locationsId=LocationAttributeConfig(), disable_auto_completers=False) def RolloutAttributeConfig(): """Creates the rollout resource attribute.""" return concepts.ResourceParameterAttributeConfig( name='rollout', help_text='The rollout associated with the {resource}.') def GetRolloutResourceSpec(): """Constructs and returns the resource specification for Rollout.""" return concepts.ResourceSpec( 'clouddeploy.projects.locations.deliveryPipelines.releases.rollouts', resource_name='rollout', deliveryPipelinesId=DeliveryPipelineAttributeConfig(), releasesId=ReleaseAttributeConfig(), rolloutsId=RolloutAttributeConfig(), projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG, locationsId=LocationAttributeConfig(), disable_auto_completers=False) def AddRolloutResourceArg(parser, help_text=None, positional=False, required=True): """Add --rollout resource argument to the parser. Args: parser: argparse.ArgumentParser, the parser for the command. help_text: help text for this flag. positional: if it is a positional flag. required: if it is required. """ help_text = help_text or 'The name of the Rollout.' concept_parsers.ConceptParser.ForResource( 'rollout' if positional else '--rollout', GetRolloutResourceSpec(), help_text, required=required, plural=False).AddToParser(parser)
#!/usr/bin/env python import argparse import yaml def read_param() -> dict: """ read parameters from terminal """ parser = argparse.ArgumentParser() parser.add_argument( "--ScreenType", help="type of screen ['enrichment'/'depletion']", type=str, choices=["enrichment", "depletion"] ) parser.add_argument("--LibFilename", help="filename of library spreadsheet", type=str) parser.add_argument("--seq_5_end", help="5'-sequence adapter", type=str) parser.add_argument("--CtrlPrefix", help="Name of control", type=str) parser.add_argument("--NonTargetPrefix", help="prefix for non-targeting sgRNAs in library", type=str) parser.add_argument("--NumGuidesPerGene", help="number of sgRNAs per gene", type=int) args = parser.parse_args() # create a dictionary to store arguments args_dict = dict() for arg in vars(args): args_dict[arg] = getattr(args, arg) return args_dict def yaml_converter(parameters: dict) -> yaml: """ converts given parameters to a yaml file """ # TODO: rename testparse.yaml to configuration.yaml with open("testparse.yaml", "w") as file: documents = yaml.dump(parameters, file) def main(): """ main function """ # create config file yaml_converter(parameters=read_param()) if __name__ == "__main__": main()
import unittest from gerel.genome.edge import Edge from gerel.genome.node import Node import itertools from tests.factories import genome_pair_factory from gerel.algorithms.NEAT.metric import generate_neat_metric class TestMetrics(unittest.TestCase): def setUp(self): # reset innovation number Node.innov_iter = itertools.count() Edge.innov_iter = itertools.count() Node.registry = {} Edge.registry = {} def test_compatibility_distance(self): metric = generate_neat_metric() g1, g2 = genome_pair_factory() d = metric(g1, g2) self.assertEqual(round(d, 5), 0.9)
import argparse import bs4 import os import re import requests import signal import sys import unidecode # determine if text is processed or not PROCESSED_TEXT = False # we can't split sentences on abbreviations that end in a '.' ABB = [ 'etc', 'mr', 'mrs', 'ms', 'dr', 'sr', 'jr', 'gen', 'rep', 'sen', 'st', 'al', 'eg', 'ie', 'in', 'phd', 'md', 'ba', 'dds', 'ma', 'mba', 'inc', 'pm', 'am', 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'sept', 'oct', 'nov', 'dec', 'mon', 'tue', 'wed', 'weds', 'thur', 'thu', 'thurs', 'fri', 'fig' ] # if we come across a word with a '.' that ends in one of these common file # extensions, we should not split on the '.' EXT = [ 'js', 'py', 'txt', 'json', 'doc', 'docx', 'pdf', 'bash', 'sh', 'java', 'jsx', 'html', 'css', 'db', 'md', 'csh', 'zsh', 'xsh', 'cpp', 'swift', 'gpg', 'pickle', 'png', 'jpg', 'jpeg', 'gif', 'tiff', 'lock', 'rb', 'git', 'gitignore', 'ico', 'webmanifest', 'icns', 'xls', 'xlsx', 'ppt', 'pptx', 'asp', 'aspx', 'yaws', 'pl', 'php', 'xml', 'svg', 'heic', 'mov', 'bz2', 'csv', 'cs', 'erl', 'asm', 'awk', 'bat', 'bmp', 'class', 'dll', 'dump', 'exe', 'hpp', 'jar', 'log', 'obj', 'rc', 'ts', 'rs', 'wav', 'zip', 'com', 'nl', 'ms' ] # we should exclude these common words when scoring sentences to get more # accurate sentence scores EXCLUDE = [ 'the', 'of', 'to', 'a', 'and', 'in', 'that', 'he', 'she', 'on', 'as', 'his', 'hers', 'for', 'is', 'by', 'was', 'with', 'at', 'from', 'has', 'its', 'mr', 'mrs', 'ms', 'dr', 'sr', 'jr', 'sen', 'rep', 'st', 'said', 'it', 'be', 'not', 'or', 'but', 'who', 'when', 'your', 'those', 'these', 'you', 'this', 'they', 'we', 'our', 'will', 'are', 'am', 'can', 'an', 'have', 'how', 'my', 'which', 'their', 'theirs', 'what', 'her', 'him', 'had', 'would', 'them', 'like', 'than', 'could', 'did', 'do' ] # if we see a lot of these symbols, the sentence is likely code and should # be ignored CODE = [ '{', '}', '=', '[', ']', '/', '\\', '@', ':', '<', '>', '!', '|', '*', '+', '-', ';', '?', '(', ')', '$', '&', '%', '^', '_', '#', '~', '`' ] # strip away these tags from the HTML to attempt to pull out meaningful text # content in these tags is usually not relevant to text meaning TAGS = [ 'script', 'style', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'nav', 'title', 'svg', 'footer' ] # if a tag has a class with one of these key words, it is usually not # relevant to text meaning CLASS_KEYWORDS = [ 'nav', 'menu', 'copyright' ] # custom exception for when timeouts occur class TimeoutException(Exception): pass # try to pull text out of source def get_text(source): text = None status_code = None global PROCESSED_TEXT # read text from file or URL if os.path.isfile(source): # update timeout settings PROCESSED_TEXT = True status_code = 200 try: with open(source) as f: text = f.read() except: print(f'Could not read from local file \'{source}\'') sys.exit(1) else: try: # get HTML from webpage html = requests.get(source) status_code = html.status_code # convert HTML to meaningful text data with 3 steps # 1. convert HTML to plaintext # 2. remove irrelevant tags # 3. remove navigation links soup = bs4.BeautifulSoup(html.content, 'html.parser') for script in soup(TAGS): script.extract() for tag in soup.find_all(): if 'class' in tag.attrs: for class_name in CLASS_KEYWORDS: if class_name in ' '.join(tag.attrs['class']).lower(): tag.extract() text = soup.get_text() # update timeout settings PROCESSED_TEXT = True except TimeoutException: sys.exit(1) except: print(f'Could not fetch data from URL') sys.exit(1) if not status_code == 200: print(f'({status_code}) Could not scrape data, unable to access data programmatically from URL') sys.exit(1) # decode text to ASCII-ish try: text = unidecode.unidecode(text) except: print(f'Could not decode parsed data') sys.exit(1) # replace persnickity characters text = text.replace('\n', ' ').replace(' " ', ' ').replace(" ' ", ' ') if text == '' or text == None or not '.' in text: print(f'Could not parse text, either no sentences found in data or data format could not be processed') sys.exit(1) return text # build resolved sentences from a block of text def get_sentences(text): sentences = [chunk for chunk in re.split('([.?!])', text) if not chunk == '' and not chunk.isspace()] sentences = [x+y for x,y in zip(sentences[0::2], sentences[1::2])] # we'll need to make sure we split up our sentences properly based on ABB and EXT index = 0 while index < len(sentences) - 2: last_word_previous_sentence = ''.join(character for character in sentences[index].split()[-1].lower() if character.isalnum()).lower() first_word_next_sentence = ''.join(character for character in sentences[index+1].split()[0].lower() if character.isalnum()).lower() if ( last_word_previous_sentence in ABB or first_word_next_sentence in EXT or len(last_word_previous_sentence) == 1 or len(first_word_next_sentence) == 1 ) and ( not last_word_previous_sentence in ['a', 'i'] and not first_word_next_sentence in ['a', 'i'] ): sentences[index] = ''.join([sentences[index], sentences[index + 1]]) del sentences[index + 1] else: index += 1 return [sentence.strip() for sentence in sentences] # calculate how frequently each word occurs in the text # we want to ignore the words in EXCLUDE def calculate_word_frequency(sentences): frequencies = {} words = ' '.join(sentences).split() raw_words = [''.join(character for character in word if character.isalnum()).lower() for word in words] for word in raw_words: if word in EXCLUDE: frequencies[word] = 0 elif word in frequencies: frequencies[word] += 1 else: frequencies[word] = 1 if '' in frequencies: del frequencies[''] return frequencies # calculate the score for each sentence depending on its word contents def calculate_sentence_scores(sentences, frequencies): scores = [] for sentence in sentences: score = 0 if sum(map(sentence.count, CODE)) >= 10: scores.append(score) continue for word in sentence.split(): raw_word = ''.join(character for character in word if character.isalnum()).lower() if raw_word == '': continue score += frequencies[raw_word] scores.append(score) # weight first 10% of text 50% heavier, as most news articles are front-loaded sentence_scores = list(zip(sentences, scores)) for index in range(int(len(sentence_scores)*0.1)): sentence_scores[index] = (sentence_scores[index][0], int(sentence_scores[index][1]*1.5)) return sentence_scores # build the summary string based on the most important sentences def build_summary(scores, limit): # build list of sentence indicies sentence_indices = [] for index, score in enumerate(scores): sentence_indices.append((index, score[1])) # sort based on sentence score sorted_sentences = sorted(sentence_indices, key=lambda item: item[1])[::-1] # build list of highest ranked sentences summary_sentences = [] for index in range(limit): if index < len(scores) - 1: summary_sentences.append(scores[sorted_sentences[index][0]][0]) # clean up text and convert to string # ~94% of English words are less than 14 characters, so we exclude words # longer than 14 characters to attempt to further clarify the text summary = ' '.join(summary_sentences) summary = ' '.join([word for word in summary.split() if len(word) < 14]) return summary # build the metrics string based on summary properties def build_metrics(text, summary, frequencies, omit, verbose): metrics = '' if omit: return metrics original_length = len(text) smooshed_length = len(summary) smooshed_percentage = (1.0 - (float(smooshed_length) / float(original_length))) * 100 smooshed_percentage = '%.2f' % smooshed_percentage metrics += '-_-_-_-_-_-_ METRICS _-_-_-_-_-_-\n' metrics += f'Original length: {original_length} characters\n' metrics += f'Smooshed length: {smooshed_length} characters\n' metrics += f'Original smooshed by {smooshed_percentage}%' if not verbose: return metrics most_common_words = sorted(frequencies.items(), key=lambda item: item[1])[-5:][::-1] most_common_words = [ word if not word[0] == 'i' else ('I', word[1]) for word in most_common_words ] metrics += '\nMost common words:\n' metrics += f' * {most_common_words[0][0]} ({most_common_words[0][1]} time{"" if most_common_words[0][1] == 1 else "s"})\n' metrics += f' * {most_common_words[1][0]} ({most_common_words[1][1]} time{"" if most_common_words[0][1] == 1 else "s"})\n' metrics += f' * {most_common_words[2][0]} ({most_common_words[2][1]} time{"" if most_common_words[0][1] == 1 else "s"})\n' metrics += f' * {most_common_words[3][0]} ({most_common_words[3][1]} time{"" if most_common_words[0][1] == 1 else "s"})\n' metrics += f' * {most_common_words[4][0]} ({most_common_words[4][1]} time{"" if most_common_words[0][1] == 1 else "s"})' return metrics # print the final results def print_results(summary, metrics): print(summary) if not metrics == '': print('') print(metrics) # build the command line parser def build_parser(): parser = argparse.ArgumentParser(description='Summarize any text article', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('source', type=str, help='the text source, which can either be the path to a local file or a URL to a webpage') parser.add_argument('-n', '--sentence-limit', type=int, default=7, required=False, help='the number of sentences that will be used to describe the text') parser.add_argument('-q', '--quiet', action='store_true', required=False, help='omit metric summary') parser.add_argument('-v', '--verbose', action='store_true', required=False, help='print verbose metric summary') parser.add_argument('-t', '--timeout', type=int, default=10, required=False, help='timeout (in seconds) to use when fetching data from a URL') return parser # raise an exception if timeout occurs def signal_handler(signum, frame): raise TimeoutException('timeout') def main(): # parse args parser = build_parser() args = parser.parse_args() # set up timeout signal.signal(signal.SIGALRM, signal_handler) signal.alarm(args.timeout) try: text = get_text(args.source) sentences = get_sentences(text) frequencies = calculate_word_frequency(sentences) scores = calculate_sentence_scores(sentences, frequencies) summary = build_summary(scores, args.sentence_limit) metrics = build_metrics(text, summary, frequencies, args.quiet, args.verbose) print_results(summary, metrics) except: if not PROCESSED_TEXT: print(f'Timeout occurred while trying to fetch data from URL') sys.exit(1) sys.exit(1) if __name__ == '__main__': main()
# Copyright 2018 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import sympy import cirq def test_commutes_on_matrices(): I, X, Y, Z = (cirq.unitary(A) for A in (cirq.I, cirq.X, cirq.Y, cirq.Z)) IX, IY = (np.kron(I, A) for A in (X, Y)) XI, YI, ZI = (np.kron(A, I) for A in (X, Y, Z)) XX, YY, ZZ = (np.kron(A, A) for A in (X, Y, Z)) for A in (X, Y, Z): assert cirq.commutes(I, A) assert cirq.commutes(A, A) assert cirq.commutes(I, XX, default='default') == 'default' for A, B in [(X, Y), (X, Z), (Z, Y), (IX, IY), (XI, ZI)]: assert not cirq.commutes(A, B) assert not cirq.commutes(A, B, atol=1) assert cirq.commutes(A, B, atol=2) for A, B in [(XX, YY), (XX, ZZ), (ZZ, YY), (IX, YI), (IX, IX), (ZI, IY)]: assert cirq.commutes(A, B) def test_commutes_on_gates_and_gate_operations(): X, Y, Z = tuple(cirq.unitary(A) for A in (cirq.X, cirq.Y, cirq.Z)) XGate, YGate, ZGate = (cirq.MatrixGate(A) for A in (X, Y, Z)) XXGate, YYGate, ZZGate = ( cirq.MatrixGate(cirq.kron(A, A)) for A in (X, Y, Z)) a, b = cirq.LineQubit.range(2) for A in (XGate, YGate, ZGate): assert cirq.commutes(A, A) assert A._commutes_on_qids_(a, A, atol=1e-8) is NotImplemented with pytest.raises(TypeError): cirq.commutes(A(a), A) with pytest.raises(TypeError): cirq.commutes(A, A(a)) assert cirq.commutes(A(a), A(a)) assert cirq.commutes(A, XXGate, default='default') == 'default' for A, B in [(XGate, YGate), (XGate, ZGate), (ZGate, YGate), (XGate, cirq.Y), (XGate, cirq.Z), (ZGate, cirq.Y)]: assert not cirq.commutes(A, B) assert cirq.commutes(A(a), B(b)) assert not cirq.commutes(A(a), B(a)) with pytest.raises(TypeError): cirq.commutes(A, B(a)) cirq.testing.assert_commutes_magic_method_consistent_with_unitaries( A, B) for A, B in [(XXGate, YYGate), (XXGate, ZZGate)]: assert cirq.commutes(A, B) with pytest.raises(TypeError): cirq.commutes(A(a, b), B) with pytest.raises(TypeError): cirq.commutes(A, B(a, b)) assert cirq.commutes(A(a, b), B(a, b)) assert cirq.definitely_commutes(A(a, b), B(a, b)) cirq.testing.assert_commutes_magic_method_consistent_with_unitaries( A, B) for A, B in [(XGate, XXGate), (XGate, YYGate)]: with pytest.raises(TypeError): cirq.commutes(A, B(a, b)) assert not cirq.definitely_commutes(A, B(a, b)) with pytest.raises(TypeError): assert cirq.commutes(A(b), B) with pytest.raises(TypeError): assert cirq.commutes(A, B) cirq.testing.assert_commutes_magic_method_consistent_with_unitaries( A, B) with pytest.raises(TypeError): assert cirq.commutes(XGate, cirq.X**sympy.Symbol('e')) with pytest.raises(TypeError): assert cirq.commutes(XGate(a), 'Gate') assert cirq.commutes(XGate(a), 'Gate', default='default') == 'default' def test_operation_commutes_using_overlap_and_unitary(): class CustomCnotGate(cirq.Gate): def num_qubits(self) -> int: return 2 def _unitary_(self): return cirq.unitary(cirq.CNOT) custom_cnot_gate = CustomCnotGate() class CustomCnotOp(cirq.Operation): def __init__(self, *qs: cirq.Qid): self.qs = qs def _unitary_(self): return cirq.unitary(cirq.CNOT) @property def qubits(self): return self.qs def with_qubits(self, *new_qubits): raise NotImplementedError() class NoDetails(cirq.Operation): def __init__(self, *qs: cirq.Qid): self.qs = qs @property def qubits(self): return self.qs def with_qubits(self, *new_qubits): raise NotImplementedError() a, b, c = cirq.LineQubit.range(3) # If ops overlap with known unitaries, fallback to matrix commutation. assert not cirq.commutes(CustomCnotOp(a, b), CustomCnotOp(b, a)) assert not cirq.commutes(CustomCnotOp(a, b), CustomCnotOp(b, c)) assert cirq.commutes(CustomCnotOp(a, b), CustomCnotOp(c, b)) assert cirq.commutes(CustomCnotOp(a, b), CustomCnotOp(a, b)) # If ops don't overlap, they commute. Even when no specified unitary. assert cirq.commutes(CustomCnotOp(a, b), NoDetails(c)) # If ops overlap and there's no unitary, result is indeterminate. assert cirq.commutes(CustomCnotOp(a, b), NoDetails(a), default=None) is None # Same stuff works with custom gate, or mix of custom gate and custom op. assert cirq.commutes(custom_cnot_gate(a, b), CustomCnotOp(a, b)) assert cirq.commutes(custom_cnot_gate(a, b), custom_cnot_gate(a, b)) assert cirq.commutes(custom_cnot_gate(a, b), CustomCnotOp(c, b)) assert cirq.commutes(custom_cnot_gate(a, b), custom_cnot_gate(c, b)) assert not cirq.commutes(custom_cnot_gate(a, b), CustomCnotOp(b, a)) assert not cirq.commutes(custom_cnot_gate(a, b), custom_cnot_gate(b, a)) assert not cirq.commutes(custom_cnot_gate(a, b), CustomCnotOp(b, c)) assert not cirq.commutes(custom_cnot_gate(a, b), custom_cnot_gate(b, c))
# Advent of Code 2018 Day 13 # from operator import add import numpy as np # Read file and extract dependencies file = open("../inputs/Advent13", 'r') input = [row[:-1] for row in file] def visualise_grid(): outgrid = np.copy(grid) for a, b, c, d in carts: outgrid[b[0], b[1]] = c for i in range(gridsize): print("".join(outgrid[i])) gridsize = 150 grid = np.empty([gridsize, gridsize], dtype=str) carts = [] dir = [[-1, 0], [0, 1], [1, 0], [0, -1]] arrows = "^>v<" # populate grid with track and extract carts for x in range(gridsize): for y in range(len(input[x])): if input[x][y] in arrows: # [cartno[0], position[1], direction[2], next turn[3]] carts.append([len(carts), [x, y], arrows.index(input[x][y]), -1]) grid[x, y] = input[x][y].translate(str.maketrans(arrows, "|-|-")) else: grid[x, y] = input[x][y] numcarts = len(carts) tick = 0 crash = False while not crash: tick += 1 move_order = [y[0] for y in sorted(carts, key=lambda x: x[1])] for move in move_order: new_position = list(map(add, carts[move][1], dir[carts[move][2]])) newgrid = grid[new_position[0], new_position[1]] for k in range(numcarts): if carts[k][1] == new_position: crash_position = [new_position[1], new_position[0]] crash = True break if crash == True: break if newgrid == "/": # ^0 becomes >1, >1 becomes ^0, v2 becomes <3, <3 becomes v2 carts[move][2] = (5 - carts[move][2]) % 4 if newgrid == "\\": # ^0 becomes <3, >1 becomes v2, v2 becomes >1, <3 becomes ^0 carts[move][2] = (3 - carts[move][2]) % 4 if newgrid == "+": carts[move][2] = (carts[move][2] + carts[move][3]) % 4 # new direction carts[move][3] = (carts[move][3] + 2) % 3 - 1 # next direction carts[move][1] = new_position visualise_grid() print("Part 1 - Crash", crash_position, "evaluated after", tick, "iterations")
from __future__ import annotations from pentagram.parse.group import parse_group from pentagram.parse.line import parse_lines from pentagram.parse.statement import parse_statements_block from pentagram.parse.word import parse_word_lines from pentagram.syntax import SyntaxBlock def parse(source: str) -> SyntaxBlock: lines = parse_lines(source) word_lines = parse_word_lines(lines) group = parse_group(word_lines) return parse_statements_block(group)
from flask import Blueprint from flask import redirect from flask import render_template from flask import request from flask import url_for from flask_login import current_user from flask_login import login_required from flask_login import login_user from flask_login import logout_user from werkzeug.urls import url_parse from app.extensions import db from app.forms import LoginForm from app.forms import RegistrationForm from app.models import User import json import plotly server_bp = Blueprint('main', __name__) @server_bp.route('/') def index(): return render_template("index.html", title='Home Page') @server_bp.route('/login/', methods=['GET', 'POST']) def login(): if current_user.is_authenticated: return redirect(url_for('main.index')) form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(username=form.username.data).first() if user is None or not user.check_password(form.password.data): error = 'Invalid username or password' return render_template('login.html', form=form, error=error) login_user(user, remember=form.remember_me.data) next_page = request.args.get('next') if not next_page or url_parse(next_page).netloc != '': next_page = url_for('main.index') return redirect(next_page) return render_template('login.html', title='Sign In', form=form) @server_bp.route('/logout/') @login_required def logout(): logout_user() return redirect(url_for('main.index')) @server_bp.route('/register/', methods=['GET', 'POST']) def register(): if current_user.is_authenticated: return redirect(url_for('main.index')) form = RegistrationForm() if form.validate_on_submit(): user = User(username=form.username.data) user.set_password(form.password.data) db.session.add(user) db.session.commit() return redirect(url_for('main.login')) return render_template('register.html', title='Register', form=form) @server_bp.route('/analiz/') def analiz(): import pandas as pd df = pd.read_csv("/home/turan/Documents/repos/financial/data/result.csv", names=['index','date','hi','lo','close','open','high','low', 'real_hi','real_lo', 'pred_1', 'calc_1', 'rsi', 'rsi_t'], parse_dates=['date']) data = df[['date', 'open', 'high', 'low', 'close', 'hi', 'lo']].tail(5) ts = df[['date','close']].set_index('date') #ts = df['close'] graphs = [ dict( data=[ dict( x=ts.index, # Can use the pandas data structures directly y=ts.close ) ] ) ] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) ids = ['graph-{}'.format(i) for i, _ in enumerate(graphs)] return render_template('analiz.html', ids=ids, graphJSON=graphJSON)
from pathlib import Path import typer from cookiecutter.main import cookiecutter from grapl_common.utils.find_grapl_root import find_grapl_root from grapl_template_generator.rust_grpc_service.create_rust_grpc_service_args import ( CreateRustGrpcServiceArgs, ) from grapl_template_generator.rw_toml import ReadWriteToml from grapl_template_generator.workspace_toml_type import WorkspaceToml def camel_case_ify(input: str) -> str: output = "" for word in input.split(" "): output += word.capitalize() return output class RustGrpcServiceTemplateExecutor(object): def __init__(self, args: CreateRustGrpcServiceArgs) -> None: self.crate_name = camel_case_ify(args.crate_name.replace("-", " ")) self.project_slug = args.crate_name self.snake_project_name = self.project_slug.lower().replace("-", "_") self.snake_project_name_caps = ( self.snake_project_name.upper() ) # for env variables # TODO: In the future, it might prove more robust to package these # templates as a resources() goal, as opposed to just reading from src/ grapl_root = find_grapl_root() assert grapl_root, "Expected to find Grapl root" self.grapl_root = grapl_root self.rust_src_path = self.grapl_root / "src" / "rust" python_src_path = self.grapl_root / "src" / "python" self.template_path = ( python_src_path / "grapl-template-generator" / "grapl_template_generator" / "grapl-templates" / "rust-grpc-service" ) self.project_path = self.rust_src_path / self.project_slug # We also have to manually move the generated protos to a specific directory. self.proto_destination = ( self.grapl_root / "src/proto/graplinc/grapl/api" / self.snake_project_name ) self.update_cargo_toml = args.update_cargo_toml def precheck(self) -> None: pass def execute_template(self) -> None: cookiecutter( str(self.template_path), no_input=True, output_dir=self.rust_src_path, extra_context={ "crate_name": self.crate_name, "project_slug": self.project_slug, "snake_project_name": self.snake_project_name, "snake_project_name_caps": self.snake_project_name_caps, }, ) self.move_protos_to_global_proto_dir() def move_protos_to_global_proto_dir(self) -> None: self.proto_destination.mkdir(exist_ok=True) proto_filenames = [ f"{self.snake_project_name}.proto", f"{self.snake_project_name}_health.proto", ] for proto_filename in proto_filenames: proto_file = Path(self.project_path / "proto" / proto_filename).resolve( strict=True ) proto_file.rename(self.proto_destination / proto_filename) def get_toml_for_workspace(self) -> ReadWriteToml[WorkspaceToml]: workspace_path = self.rust_src_path / "Cargo.toml" return ReadWriteToml(workspace_path) def attach_to_workspace(self) -> None: # Theoretically, we could always automate this step. Unfortunately, the python # toml encoder/decoder doesn't want to play nicely with our comments. # https://github.com/uiri/toml/issues/371 new_workspace_member = f"./{self.project_slug}" if self.update_cargo_toml: workspace_toml = self.get_toml_for_workspace() workspace_toml.loaded_toml["workspace"]["members"].append( f"./{self.project_slug}" ) workspace_toml.loaded_toml["workspace"]["members"].sort() workspace_toml.write() else: typer.echo( f"NOTE: Please add {new_workspace_member} to cargo.toml's [workspace][members]" ) def check_workspace(self) -> None: workspace_toml = self.get_toml_for_workspace() for member in workspace_toml.loaded_toml["workspace"]["members"]: if member.endswith(self.project_slug): raise ValueError(f"Member already exists in workspace {member}") def create_rust_grpc_service(args: CreateRustGrpcServiceArgs) -> None: executor = RustGrpcServiceTemplateExecutor(args) executor.precheck() executor.execute_template() executor.attach_to_workspace()
class SplitRule: def __init__(self, attribute, split_value): self.attribute = attribute self.split_value = split_value def __str__(self): return "A" + str(self.attribute) + " > " + str(self.split_value)
#Script để tải truyện từ Truyencv #Script tải html từng chương truyện về, sau đó extract lấy text, ghi vào file Ketqua.txt from bs4 import BeautifulSoup import requests import fileinput import random import time import os headers = { 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 5.0; App Runtime for Chrome Dev Build/54.5021.629.0)', 'Accept-Encoding': 'gzip' } #Khoi tao StartChapter=1 #So chuong bat dau EndChapter=1315 #So chuong Ket thuc x=StartChapter strURLStory='Đường-dẫn-đến-chương-truyện' #Sample: strURLStory='https://truyencv.com/pokemon-he-thong-thanh-tuu-dai-su/chuong-' filenameTXT ="Ketqua.txt" while (x<= EndChapter): StrippedContent="" ChapterURL=strURLStory+str(x)+'/' response=requests.get(ChapterURL,headers=headers) filenameHTML =str(x)+".html" open(filenameHTML, 'wb').write(response.content) with open(filenameHTML, encoding="utf-8") as fp: soup = BeautifulSoup(fp,"lxml") #Luu tieu de StrippedContent='C'+str(x).zfill(4)+' - '+ soup.title.string+'\n' div= soup.find(id="js-truyencv-content") #Loai bo tag br for elem in div.find_all("br"): elem.replace_with(elem.text + "\n") #Luu Noi dung chuong StrippedContent=StrippedContent+'\n'+div.text with open(filenameTXT, 'a', encoding="utf-8") as handle: handle.write(StrippedContent) print('Đã tai ('+str(x)+'/'+str(EndChapter)+')' ) x+=1 os.remove(filenameHTML) #Tam dung mot chut SleepTime=random.randint(5, 10) time.sleep(SleepTime) print('Hoan tat!')
import numpy as np import matplotlib.pyplot as plt import sys from pdb import set_trace np.random.seed(0) if len(sys.argv) < 2: print("Usage: python xx.py fpath fig_title out_file_name") exit(0) fpath = sys.argv[1] data = np.loadtxt(fpath, delimiter=',') nrows = 6 ncols = 8 idxes = np.random.choice(data.shape[0], nrows*ncols) counter = 1 for i in idxes: row = data[i,:] pixels = row.reshape(28,28)*255 pixels = np.clip(pixels,0,255) # fix predictions pixels = pixels.astype(np.uint8) plt.subplot(nrows, ncols, counter) plt.imshow(pixels, cmap='gray') counter += 1 ax = plt.gca() ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) ax.axes.xaxis.set_ticklabels([]) ax.axes.yaxis.set_ticklabels([]) if len(sys.argv) == 2: name = sys.argv[1].split('.')[0] plt.savefig(name + '.jpg') else: plt.suptitle(sys.argv[2]) plt.savefig(sys.argv[3]) # pick something that is different across model
from web.app import db from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin from web.app import login_manager import telegram.chat.states as states from sqlalchemy import or_ from time import time import jwt from flask import current_app @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) class User(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(100)) username = db.Column(db.String(50), nullable=False, unique=True) email = db.Column(db.String(100), nullable=False, unique=True) password_hash = db.Column(db.String(100), nullable=False) created_on = db.Column(db.DateTime(), default=datetime.utcnow) updated_on = db.Column(db.DateTime(), default=datetime.utcnow, onupdate=datetime.utcnow) telegram_id = db.Column(db.Integer()) telegram_nickname = db.Column(db.String(100)) lms_id = db.Column(db.Integer()) flag_emails_from_default_mail = db.Column(db.Boolean(), nullable=False, default=False) flag_is_messages_from_bot_is_delivered = db.Column(db.Boolean(), nullable=False, default=True) courses = db.relationship('Course', backref='author', lazy='dynamic') def __repr__(self): return "<User {}>".format(self.id) def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) def get_all_not_deleted_courses(self): return self.courses.filter_by(deleted=False) @staticmethod def get_all_users(): return User.query.all() def get_course_by_name(self, name): return self.get_all_not_deleted_courses().filter_by(name=name).first() def get_course_by_lms_id(self, lms_id): return self.get_all_not_deleted_courses().filter_by(lms_id=lms_id).first() def get_reset_password_token(self, expires_in=600): return jwt.encode({'reset_password': self.id, 'exp': time() + expires_in},current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8') @staticmethod def verify_reset_password_token(token): try: id = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password'] except: return return User.query.get(id) class Course(db.Model): __tablename__ = 'courses' id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(140), nullable=False) deleted = db.Column(db.Boolean(), nullable=False, default=False) lms_id = db.Column(db.Integer(), nullable=False) trainer_telegram_id = db.Column(db.Integer(), nullable=False) trainer_lms_id = db.Column(db.Integer(), nullable=False) num_of_blocks = db.Column(db.Integer(), nullable=False, default=1) number_of_homeworks = db.Column(db.Integer(), nullable=False) is_certificate_needed = db.Column(db.Boolean(), nullable=False, default=False) default_num_days = db.Column(db.Integer(), nullable=False, default=30) created_on = db.Column(db.DateTime(), default=datetime.utcnow) updated_on = db.Column(db.DateTime(), default=datetime.utcnow, onupdate=datetime.utcnow) review_link = db.Column(db.String(300), nullable=False) help = db.Column(db.Text(), nullable=False, default="К сожалению, к данному курсу отсутствуют какие-либо вспомогательные материалы.") user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) blocks = db.relationship('CourseBlock', backref='course', lazy='dynamic') homeworks = db.relationship('Homework', backref='course', lazy='dynamic') students = db.relationship('Student', backref='course', lazy='dynamic') def __repr__(self): return '<Course {}>'.format(self.id) @staticmethod def delete_course_by_id(course_id): Course.query.filter_by(id=course_id).first().deleted = True @staticmethod def get_course_by_id(course_id): return Course.query.filter_by(id=course_id).first() def get_all_not_deleted_students(self): return self.students.filter_by(deleted=False) def get_all_not_deleted_blocks(self): return self.blocks.filter_by(deleted=False) def get_all_not_deleted_active_students(self): print(Student.student_statuses["active"]) return self.get_all_not_deleted_students().filter_by(status=Student.student_statuses["active"]) def get_all_not_deleted_freezed_students(self): return self.get_all_not_deleted_students().filter_by(status=Student.student_statuses["freezed"]) def get_all_not_deleted_finished_students(self): return self.get_all_not_deleted_students().filter_by(status=Student.student_statuses["finished"]) def get_all_not_deleted_dropped_students(self): return self.get_all_not_deleted_students().filter_by(status=Student.student_statuses["dropped"]) @staticmethod def get_all_not_deleted_courses(): return Course.query.filter_by(deleted=False) @staticmethod def get_course_by_lms_id(lms_id): return Course.get_all_not_deleted_courses().filter_by(lms_id=lms_id).first() @staticmethod def get_course_by_name(name): return Course.get_all_not_deleted_courses().filter_by(name=name).first() def get_not_deleted_student_by_email(self, email): return self.get_all_not_deleted_students().filter_by(email=email).first() def get_not_deleted_student_by_lms_id(self, lms_id): return self.get_all_not_deleted_students().filter_by(lms_id=lms_id).first() @staticmethod def find_students_by_search_param(students, search_param): return students.filter(or_(Student.name == search_param, Student.email == search_param, Student.lms_email == search_param, Student.telegram_id == search_param)) def delete_block_by_num(self, num): self.get_all_not_deleted_blocks().filter_by(number=num).first().deleted = True def get_block_by_num(self, num): return self.get_all_not_deleted_blocks().filter_by(number=num).first() def get_all_not_deleted_homeworks(self): return self.homeworks.filter_by(deleted=False) def delete_homework_by_num(self, num): self.get_all_not_deleted_homeworks().filter_by(number=num).first().deleted = True def get_homework_by_num(self, num): return self.get_all_not_deleted_homeworks().filter_by(number=num).first() def get_homework_by_short_name(self, short_name): return self.get_all_not_deleted_homeworks().filter_by(short_name=short_name).first() class CourseBlock(db.Model): __tablename__ = 'course_blocks' id = db.Column(db.Integer(), primary_key=True) number = db.Column(db.Integer(), nullable=False) # lms_id таски, которую нужно выполнить чтобы открылся блок. Если null, то блок открыт и так required_task_lms_id = db.Column(db.Integer()) # Материалы по блоку. Могут отсутствовать если их не настроить link = db.Column(db.String(100)) deleted = db.Column(db.Boolean(), nullable=False, default=False) course_id = db.Column(db.Integer, db.ForeignKey('courses.id'), nullable=False) check = db.relationship('Check', backref='block', uselist=False) def __repr__(self): return "<CourseBlock {}>".format(self.id) class Homework(db.Model): __tablename__ = 'homeworks' id = db.Column(db.Integer(), primary_key=True) lms_id = db.Column(db.Integer()) short_name = db.Column(db.String(100)) answer_link = db.Column(db.String(100)) deleted = db.Column(db.Boolean(), nullable=False, default=False) # Невидимое поле нужное для корректного удаления и добавления из при изменении их количества number = db.Column(db.Integer(), nullable=False) course_id = db.Column(db.Integer, db.ForeignKey('courses.id'), nullable=False) def __repr__(self): return "<Homework {}>".format(self.id) class Student(db.Model): student_statuses = {"active": "active", "finished": "finished", "dropped": "dropped", "freezed": "freezed"} __tablename__ = 'students' id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(100), nullable=False) email = db.Column(db.String(100), nullable=False) lms_email = db.Column(db.String(100), nullable=False) status = db.Column(db.String(100), nullable=False, default=student_statuses["active"]) number_of_days = db.Column(db.Integer(), nullable=False) lms_id = db.Column(db.Integer(), nullable=False) registration_code = db.Column(db.String(100), nullable=False) telegram_id = db.Column(db.Integer()) deleted = db.Column(db.Boolean(), nullable=False, default=False) created_on = db.Column(db.DateTime(), default=datetime.utcnow) updated_on = db.Column(db.DateTime(), default=datetime.utcnow, onupdate=datetime.utcnow) cert_link = db.Column(db.String(100)) course_id = db.Column(db.Integer, db.ForeignKey('courses.id'), nullable=False) checks = db.relationship('Check', backref='student', lazy='dynamic') def __repr__(self): return "<Student {}>".format(self.id) @staticmethod def delete_student_by_id(student_id): Student.get_student_by_id(student_id).deleted = True @staticmethod def get_student_by_id(student_id): return Student.query.filter_by(id=student_id).first() @staticmethod def freeze_or_unfreeze_student_by_id(student_id): student = Student.get_student_by_id(student_id) if student.status == Student.student_statuses["freezed"]: student.status = Student.student_statuses["active"] else: student.status = Student.student_statuses["freezed"] @staticmethod def finish_or_unfinish_student_by_id(student_id): student = Student.get_student_by_id(student_id) if student.status == Student.student_statuses["finished"]: student.status = Student.student_statuses["active"] else: student.status = Student.student_statuses["finished"] @staticmethod def drop_or_undrop_student_by_id(student_id): student = Student.get_student_by_id(student_id) if student.status == Student.student_statuses["dropped"]: student.status = Student.student_statuses["active"] else: student.status = Student.student_statuses["dropped"] @staticmethod def add_days_to_student(student_id): student = Student.get_student_by_id(student_id) course = Course.get_course_by_id(student.course_id) student.number_of_days += course.default_num_days return student.number_of_days def get_all_not_deleted_checks(self): return self.checks.filter_by(deleted=False) def return_color_of_td(self): if self.status == Student.student_statuses["dropped"]: return "gray" elif self.status == Student.student_statuses["finished"]: return "9966cc" elif self.status == Student.student_statuses["freezed"]: return "aqua" else: if self.number_of_days < 0: return "ffc0cb" else: return "98ff98" class TelegramState(db.Model): __tablename__ = 'telegram_states' id = db.Column(db.Integer(), primary_key=True) telegram_id = db.Column(db.Integer(), nullable=False, unique=True) state = db.Column(db.String(100), nullable=False, default=states.START) temp_lms_email = db.Column(db.String(100)) temp_course_register_id = db.Column(db.Integer, db.ForeignKey('courses.id')) current_course_id = db.Column(db.Integer, db.ForeignKey('courses.id')) # id студентов которые соотносятся с конкретным телеграм id # Для каждого курса студент создается заново, хотя это может быть один и тот же человек # Создание отдельных студентов для кжадого курса очень сильно упрощает систему, но вот в данном месте это немного костыль # После верификации, экземпляр студента получает в свое поле телеграм id temp_course_student_id = db.Column(db.Integer, db.ForeignKey('students.id')) def __repr__(self): return "<TelegramState {}>".format(self.id) class Check(db.Model): __tablename__ = 'checks' id = db.Column(db.Integer(), primary_key=True) link = db.Column(db.String(100), nullable=False) amount = db.Column(db.Integer(), nullable=False) created_on = db.Column(db.DateTime(), default=datetime.utcnow) updated_on = db.Column(db.DateTime(), default=datetime.utcnow, onupdate=datetime.utcnow) # Оплата может быть за блок или другую услугу another = db.Column(db.String(50), nullable=True) block_id = db.Column(db.Integer, db.ForeignKey('course_blocks.id'), nullable=True) student_id = db.Column(db.Integer, db.ForeignKey('students.id'), nullable=False) deleted = db.Column(db.Boolean(), nullable=False, default=False) def __repr__(self): return "<Check {}>".format(self.id) @staticmethod def delete_check_by_id(check_id): Check.query.filter_by(id=check_id).first().deleted = True
""" matread1.py A demo program for reading in a matrix then printing it out """ from __future__ import print_function import sys import numpy from np_helper import loadmatrix1, printmatrix1 def read1(argv): if len(argv) < 2: print("Needs an input file name on arg1", file=sys.stderr) sys.exit(1) matfile = argv[1] M = loadmatrix1(matfile) #printmatrix1(M, float_fmt=" %9.6g") printmatrix1(M) if __name__ == "__main__": read1(sys.argv)