code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# AIPrompts
This is a simple prompt builder for OpenAI models. Easy to use and to modify.
## Install
`pip install AIPrompts`
`pip install AIPrompts@git+https://github.com/TeiaLabs/prompts.git@master`
## Dynamic prompts
```python
template = 'a photo of a <img_label>'
prompt = DynamicPrompt(template)
filled_prompt = prompt.build(img_label='dog')
print(filled_prompt)
# out: "a photo of a dog"
```
## Dynamic prompts from file templates
Build your own prompt by creating a file following a sample.prompt file (yaml format), and use the DynamicPrompt class to parse it:
```python
prompt = DynamicPrompt.from_file('samples/sample.prompt')
str_prompt = prompt.build(
input_sentence="lets go",
)
```
You can also access recommended model settings (engine, temperature) that can be fed to the model input (e.g., openai.Completion.create()):
```python
prompt.get_model_settings()
```
## Improve Autocomplete with custom prompts
Alternatively, to get more control and better autocomplete suggestions, you can inherit from the `BasePrompt` class and override the build method with explicit arguments:
```python
class MyPrompt(BasePrompt):
def build(self, input_sentence):
return self.set_prompt_values(
input_sentence=input_sentence,
)
```
## Ensembling prompts
To ensemble multiple prompts, you can use the `PromptEnsemble` class:
```python
templates = [
'<label>',
'a photo of <label>',
'picture of <label>',
]
exp_vars = ['label']
prompt = PromptEnsemble(templates, exp_vars)
prompt.build(label='dog')
# out: ['dog', 'a photo of dog', 'picture of dog']
prompt.build(label='cat')
# out: ['cat', 'a photo of cat', 'picture of cat']
```
The output is a flattened list with all filled in templates.
Note: all templates must be filled with the same expected variables, and all variables must be provided.
You can also build multiple promtps at the same time (useful for classification):
```python
templates = [
'<label>',
'a photo of <label>'
]
template_vars = [
'label'
]
labels = ['dog', 'cat', 't-shirt']
prompt = PromptEnsemble(templates, template_vars)
prompt.build_many(
label=labels
)
# out: ['dog', 'a photo of dog', 'cat', 'a photo of cat', 't-shirt', 'a photo of t-shirt']
```
| AIPrompts | /AIPrompts-0.5.4.tar.gz/AIPrompts-0.5.4/README.md | README.md |
import copy
import yaml
from .dynamic import DynamicPrompt
from .exceptions import TemplateNotInPromptError
from .schemas import (
ChatMLMessage,
OpenAIModelSettings,
PromptRole,
Template,
TemplateInputs,
TurboSchema,
)
TEMPLATE_TYPE = list[Template] | DynamicPrompt | str | None
class TurboPrompt:
def __init__(
self,
system_templates: TEMPLATE_TYPE = None,
user_templates: TEMPLATE_TYPE = None,
assistant_templates: TEMPLATE_TYPE = None,
settings: OpenAIModelSettings | dict | None = None,
name: str = "",
description: str | None = None,
):
self.default_template = "default"
if isinstance(settings, dict):
settings = OpenAIModelSettings(**settings)
self.system_prompt = self.__format_prompt_template(system_templates)
self.user_prompt = self.__format_prompt_template(user_templates)
self.assistant_prompt = self.__format_prompt_template(assistant_templates)
self.settings: OpenAIModelSettings | None = settings
self.name = name
self.description = description
self.prompts = []
def __format_prompt_template(
self, template: TEMPLATE_TYPE
) -> dict[str, DynamicPrompt]:
if template is None:
template = "<message>"
if isinstance(template, str):
template = DynamicPrompt(template)
if isinstance(template, DynamicPrompt):
template = {self.default_template: template} # type: ignore
if isinstance(template, list):
template = {
t.template_name: DynamicPrompt(t.template) for t in template
} # type: ignore
return template # type: ignore
def add_user_template(self, template_name: str, template: str | DynamicPrompt):
if isinstance(template, str):
template = DynamicPrompt(template)
self.user_prompt[template_name] = template
def add_system_template(self, template_name: str, template: str | DynamicPrompt):
if isinstance(template, str):
template = DynamicPrompt(template)
self.system_prompt[template_name] = template
def add_assistant_template(self, template_name: str, template: str | DynamicPrompt):
if isinstance(template, str):
template = DynamicPrompt(template)
self.assistant_prompt[template_name] = template
def add_user_message(
self,
name: str | None = None,
template_name: str | None = None,
**kwargs,
):
if template_name is None:
template_name = self.default_template
try:
prompt = self.user_prompt[template_name].build(**kwargs)
except KeyError:
raise TemplateNotInPromptError(f"Template {template_name} not found")
self._add_prompt(
prompt_type=PromptRole.USER,
prompt=prompt,
name=name,
)
def add_system_message(
self,
name: str | None = None,
template_name: str | None = None,
**kwargs,
):
if template_name is None:
template_name = self.default_template
try:
prompt = self.system_prompt[template_name].build(**kwargs)
except KeyError:
raise TemplateNotInPromptError(f"Template{template_name} not found")
self._add_prompt(
prompt_type=PromptRole.SYSTEM,
prompt=prompt,
name=name,
)
def add_assistant_message(
self,
name: str | None = None,
template_name: str | None = None,
**kwargs,
):
if template_name is None:
template_name = self.default_template
try:
prompt = self.assistant_prompt[template_name].build(**kwargs)
except KeyError:
raise TemplateNotInPromptError(f"Template {template_name} not found")
self._add_prompt(
prompt_type=PromptRole.ASSISTANT,
prompt=prompt,
name=name,
)
def _add_prompt(
self,
prompt_type: PromptRole,
prompt: str,
name: str | None = None,
):
prompt_message = {
"role": prompt_type.value,
"content": prompt,
}
if name is not None:
prompt_message["name"] = name
self.prompts.append(prompt_message)
def build(self, **_) -> list[dict[str, str]]:
return copy.deepcopy(self.prompts)
def add_raw_content(self, content_item: dict | ChatMLMessage):
if isinstance(content_item, dict):
content_item = ChatMLMessage(**content_item)
self._add_prompt(
prompt_type=content_item.role,
prompt=content_item.content,
name=content_item.name,
)
def clear(self):
self.prompts.clear()
@classmethod
def from_turbo_schema(cls, prompt_schema: TurboSchema):
turbo_prompt = cls(
name=prompt_schema.name,
description=prompt_schema.description,
settings=prompt_schema.settings,
)
turbo_prompt.add_template(
prompt_schema.system_templates, type=PromptRole.SYSTEM
)
turbo_prompt.add_template(prompt_schema.user_templates, type=PromptRole.USER)
turbo_prompt.add_template(
prompt_schema.assistant_templates, type=PromptRole.ASSISTANT
)
turbo_prompt.add_initial_template_data(
turbo_prompt, prompt_schema.initial_template_data
)
return turbo_prompt
def add_template(
self,
template: list[Template] | str,
type: PromptRole = PromptRole.ASSISTANT,
) -> None:
turbo_add_template_fn = {
PromptRole.ASSISTANT: self.add_assistant_template,
PromptRole.USER: self.add_user_template,
PromptRole.SYSTEM: self.add_system_template,
}
if isinstance(template, str):
turbo_add_template_fn[type](template_name="default", template=template)
elif isinstance(template, list):
for p in template:
turbo_add_template_fn[type](
template_name=p.template_name, template=p.template
)
else:
raise ValueError(
f"{type}_prompt must be a string or a list of strings/prompts"
)
def add_initial_template_data(
self,
prompt: "TurboPrompt",
initial_template_data: list[TemplateInputs | ChatMLMessage] | None,
) -> None:
if initial_template_data is None:
return
for hist in initial_template_data:
if isinstance(hist, ChatMLMessage):
prompt.add_raw_content(hist)
continue
if hist["role"] == PromptRole.SYSTEM:
prompt.add_system_message(
template_name=hist.get("template_name"), **hist["inputs"]
)
elif hist["role"] == PromptRole.USER:
prompt.add_user_message(
template_name=hist.get("template_name"), **hist["inputs"]
)
elif hist["role"] == PromptRole.ASSISTANT:
prompt.add_assistant_message(
template_name=hist.get("template_name"), **hist["inputs"]
)
else:
raise ValueError(
f"Invalid role in initial_template_data: {hist['role']}"
)
@classmethod
def from_file(cls, file_path: str):
with open(file_path, "r") as f:
prompt_data = yaml.safe_load(f)
tb = TurboSchema(**prompt_data)
return cls.from_turbo_schema(tb)
@classmethod
def from_settings(
cls,
name: str,
description: str,
settings: OpenAIModelSettings,
initial_template_data: list[TemplateInputs] | list[ChatMLMessage],
system_template: list[Template] | str = "",
user_template: list[Template] | str = "",
assistant_template: list[Template] | str = "",
):
tbs = TurboSchema(
name=name,
description=description,
system_templates=system_template,
user_templates=user_template,
assistant_templates=assistant_template,
initial_template_data=initial_template_data,
settings=settings,
)
return cls.from_turbo_schema(tbs)
def to_dynamic(self) -> DynamicPrompt:
prompts = []
template_vars = set()
for prompt in self.system_prompt.values():
prompts.append(prompt.template)
template_vars.update(prompt.template_vars or [])
for prompt in self.user_prompt.values():
prompts.append(prompt.template)
template_vars.update(prompt.template_vars or [])
for prompt in self.assistant_prompt.values():
prompts.append(prompt.template)
template_vars.update(prompt.template_vars or [])
return DynamicPrompt(
name=self.name,
template="\n".join(prompts),
template_vars=list(template_vars) or None,
) | AIPrompts | /AIPrompts-0.5.4.tar.gz/AIPrompts-0.5.4/prompts/turbo.py | turbo.py |
from typing import TYPE_CHECKING
from .exceptions import UndefinedVariableError
from .schemas import DynamicSchema, OpenAIModelSettings
from .utils import load_yaml
if TYPE_CHECKING:
from .turbo import TurboPrompt
class DynamicPrompt:
"""
DynamicPrompt.
>>> template = "this is a <dog>"
>>> template_vars = ['dog']
>>> prompt = DynamicPrompt(template, template_vars)
>>> prompt.build(dog="cat")
'this is a cat'
"""
def __init__(
self,
template: str,
template_vars: list[str] | None = None,
name: str = "",
description: str = "",
settings: OpenAIModelSettings | dict[str, str] | None = None,
):
self.name = name
self.description = description
self.template = template
self.template_vars = template_vars
if isinstance(settings, dict):
settings = OpenAIModelSettings(**settings)
self.settings: OpenAIModelSettings | None = settings
def build(self, strict=True, **kwargs):
prompt = self.template
for var, value in kwargs.items():
pattern = f"<{var}>"
if pattern not in prompt and strict:
raise UndefinedVariableError(
message=f"Variable {var} was not found in prompt (expected vars={self.template_vars})."
)
prompt = prompt.replace(pattern, value)
return prompt
def to_turbo(self) -> "TurboPrompt":
from .turbo import TurboPrompt
return TurboPrompt(system_templates=self)
@classmethod
def from_file(cls, prompt_file: str) -> "DynamicPrompt":
prompt = load_yaml(prompt_file)
schema = DynamicSchema(**prompt)
return cls(**schema.dict())
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"name={self.name}, "
f"description={self.description}, "
f'template="""{self.template}""", '
f"template_vars={self.template_vars}, "
f"settings={self.settings}"
) | AIPrompts | /AIPrompts-0.5.4.tar.gz/AIPrompts-0.5.4/prompts/dynamic.py | dynamic.py |
from __future__ import annotations
from typing import Optional, Type
from .dynamic import DynamicPrompt
from .exceptions import ArgumentNumberOfElementsError, ExpectedVarsArgumentError
class PromptEnsemble:
def __init__(
self,
templates: list[str],
expected_vars: Optional[list[str]] = None,
prompt_class: Type[DynamicPrompt] = DynamicPrompt,
):
"""
Args:
templates: templates with placeholder variable names
expected_vars: variables expected in all templates
prompt_class: allows custom prompt classes
Examples:
>>> templates = ["a photo of a <class>", "picture of <class>"]
>>> expected_vars = ["class"]
>>> prompt = PromptEnsemble(templates, expected_vars)
```
"""
self.prompts = []
for template in templates:
if isinstance(template, str):
if expected_vars is None:
raise ExpectedVarsArgumentError(
"expected_vars argument is mandatory when using string templates"
)
prompt = prompt_class(
name="",
description="",
template=template,
template_vars=expected_vars,
)
self.prompts.append(prompt)
else:
self.prompts.append(template)
def build(self, **kwargs) -> list:
"""
Example:
```
build(
label='dog',
superclass='animal',
)
```
"""
filled_prompts = []
for prompt in self.prompts:
filled_prompts.append(prompt.build(**kwargs))
return filled_prompts
def build_many(self, **kwargs) -> list:
"""
Example:
```
build_many(
label=['dog', 'cat', 't-shirt'],
superclass=['animal', 'animal', 'clothes']
)
```
"""
strict = kwargs.pop("strict", False)
var_names = list(kwargs.keys())
n_vars = len(kwargs[var_names[0]])
ns = set([len(v) for v in kwargs.values()])
if len(ns) > 1:
raise ArgumentNumberOfElementsError(
f"All arguments must have the same number of elements."
f"Current element sizes: {ns}"
)
vars_to_fill = [
{var_name: kwargs[var_name][i] for var_name in var_names}
for i in range(n_vars)
]
filled_prompts = [
prompt.build(**var_fill, strict=strict)
for var_fill in vars_to_fill
for prompt in self.prompts
]
return filled_prompts
@classmethod
def from_paths(cls, paths: list[str], prompt_class=DynamicPrompt):
prompts = []
for path in paths:
prompts.append(prompt_class.from_file(path))
return cls(prompts, None)
def __len__(self) -> int:
return len(self.prompts) | AIPrompts | /AIPrompts-0.5.4.tar.gz/AIPrompts-0.5.4/prompts/ensemble.py | ensemble.py |
# AI RPA Library
Library ini merupakan pelengkap library untuk RPA dengan robotframeworks, untuk fitur atau fungsi yang ada pada library ini adalah sebagai berikut:
1. Read Config
digunakan untuk melakukan membaca file konfigurasi dengan format dictionary pada dengan nama file config.json serta fungsi ini juga menambahkan dictionary yang belum ada pada file config.json dengan input parameter json object.
2. Timeout
digunakan untuk memberikan waktu timeout pada sebuah proses yang akan dilanjutkan, proses timeout ini memanfaatkan multiprocessing module python.
3. Send Email
digunakan untuk mengirimkan email via api
| AIRPALibrary | /AIRPALibrary-1.0.5.tar.gz/AIRPALibrary-1.0.5/README.md | README.md |
from PySide2 import QtCore
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import sys
import datetime
import time
from AIRPrediction import *
"""
.. module:: sampleapp
:synopsis: The sample app that demonstrates the functionality of the AIRPrediction framework. Creates a UI that
accepts user input and calls the functions in the AIRPrediction to utilize the Time Series Models to produce
predictions.
.. moduleauthors:: Colin Naehr <[email protected]>, Daniel Casto <[email protected]>, Derek Pena <[email protected]>,
Haotian Wang <[email protected]>
"""
class MainWindow(QWidget):
""" Class that holds all of the UI elements of the sample app and their functionality.
Parent class: QWidget
"""
def __init__(self):
""" Initializes the UI elements of the sample app.
"""
super().__init__()
self.setGeometry(0, 0, 1000, 700)
self.setWindowTitle("Pollutant Forecaster")
self.grid = QGridLayout()
self.setLayout(self.grid)
self.pollutant_label = QLabel('Enter Pollutant (NO2, O3, SO2, or CO):')
self.pollutant_edit = QLineEdit()
self.city_label = QLabel('Enter City: ')
self.city_edit = QLineEdit()
self.county_label = QLabel('Enter County: ')
self.county_edit = QLineEdit()
self.state_label = QLabel('Enter State: ')
self.state_edit = QLineEdit()
self.end_date_label = QLabel('Enter Future Date (MM/DD/YYYY): ')
self.end_date_edit = QLineEdit()
selector = QLabel('Select a forecasting method below:')
self.radiobtn1 = QRadioButton('Prophet Model')
self.radiobtn1.setMinimumHeight(40)
self.radiobtn2 = QRadioButton('Arima')
self.radiobtn2.setMinimumHeight(40)
self.radiobtn3 = QRadioButton('Compare Both Models')
self.radiobtn3.setMinimumHeight(40)
self.msg_text = QLabel()
self.button_one = QPushButton("Get Results")
self.grid.addWidget(self.pollutant_label, 0, 0)
self.grid.addWidget(self.pollutant_edit, 0, 1)
self.grid.addWidget(self.state_label, 1, 0)
self.grid.addWidget(self.state_edit, 1, 1)
self.grid.addWidget(self.county_label, 2, 0)
self.grid.addWidget(self.county_edit, 2, 1)
self.grid.addWidget(self.city_label, 3, 0)
self.grid.addWidget(self.city_edit, 3, 1)
self.grid.addWidget(self.end_date_label, 4, 0)
self.grid.addWidget(self.end_date_edit, 4, 1)
self.grid.addWidget(selector, 5, 0)
self.grid.addWidget(self.radiobtn1, 6, 0)
self.grid.addWidget(self.radiobtn2, 6, 1)
self.grid.addWidget(self.radiobtn3, 7, 0)
self.msg_text.setMaximumSize(1000, 50)
self.grid.addWidget(self.msg_text, 5, 1)
self.button_one.clicked.connect(self.__submit_input)
self.grid.addWidget(self.button_one, 8, 1)
self.show()
def __submit_input(self):
""" Reads input and outputs state updates informing the user of errors, progress, and results. Functions from
AIRPrediction are called to validate input and call on the prediction models.
Called when submit button is pressed.
:return: None
"""
self.msg_text.setText('')
QApplication.processEvents()
pl = self.pollutant_edit.text()
state = self.state_edit.text()
county = self.county_edit.text()
city = self.city_edit.text()
date_feature_available = True
if date_feature_available:
ed = self.end_date_edit.text()
try:
self.msg_text.setText('Validating Input... (Fields cannot be edited at this time)')
QApplication.processEvents()
validate, return_message, date_string = validate_input(pl, state, county, city, ed)
if self.radiobtn1.isChecked():
if validate:
self.msg_text.setText('Input Validation Success! Running Prophet Model... (Fields cannot be edited at this time)')
QApplication.processEvents()
prophet_result, pollutant_unit = prophet_prediction(pl, state, county, city, date_string)
self.msg_text.setText(f'The forecast for {pl} in {city}, {county}, {state} is {prophet_result} {pollutant_unit}')
else:
self.msg_text.setText(return_message)
elif self.radiobtn2.isChecked():
if validate:
self.msg_text.setText('Input Validation Success! Running ARIMA Model... (Fields cannot be edited at this time)')
QApplication.processEvents()
arima_result, pollutant_unit = arima_prediction(pl, state, county, city, date_string)
self.msg_text.setText(f'The forecast for {pl} in {city}, {county}, {state} is {arima_result} {pollutant_unit}')
else:
self.msg_text.setText(return_message)
elif self.radiobtn3.isChecked():
if validate:
self.msg_text.setText('Input Validation Success! Running Model Comparison... (Fields cannot be edited at this time)')
QApplication.processEvents()
results = compare_models(pl, state, county, city, date_string)
self.msg_text.setText(f'Prophet Prediction: {results[0]} {results[1]} Time: {results[2]} seconds'
f'\nARIMA Prediction: {results[3]} {results[4]} Time: {results[5]} seconds.')
else:
self.msg_text.setText(return_message)
else:
self.msg_text.setText(f'Please select a prediction method.')
except:
self.msg_text.setText('Error: something went wrong in prediction')
print('Error: something went wrong in prediction')
def main():
""" The sample app that demonstrates the functionality of the AIRPrediction framework. Creates a UI that
accepts user input and calls the functions in the AIRPrediction to utilize the Time Series Models to produce
predictions. main () generates the window the application is hosted on.
Module Authors: Colin Naehr <[email protected]>, Daniel Casto <[email protected]>, Derek Pena <[email protected]>,
Haotian Wang <[email protected]>
:return: None
"""
app = QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/sampleapp.py | sampleapp.py |
# AIRPrediction
## Contributors
Daniel Casto, Colin Naehr, Derek Pena, and Haotian Wang
## Special Thanks
Yuko Matsumoto - Pitching the idea for AIRPrediction, forming Data for Good by UF, and performing extensive research into the Time Series Models (Prophet and ARIMA).
## Description
(Framework) AIR Quality Forecast Model leveraging Time Series Models to enable timely preventive measures to reduce harmful impact on U.S. citizens.
## Documentation Navigation
To find the documentation in man format of the features of this framework, utilize this navigation:
docs -> build -> html -> index.html
## Dataset Used to Train the Prediction Model
Since GitHub has a strict file limit of 100MB, we cannot upload the dataset to GitHub.
You need to include this csv file manually in the data folder, and it can be downloaded from this link:
https://www.kaggle.com/sogun3/uspollution
pollution_us_2000_2016.csv(382.37 MB)
## Package Name for PIP Installation
`pip install AIRPrediction`
## Command to Run Sample App
`sampleapp`
## Repository Link
https://github.com/danielcasto/AIRPrediction
| AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/README.md | README.md |
import datetime
import sys
import csv
import time
from Time_Series_Models.prophet_model import prophet_prediction
from Time_Series_Models.ARIMA_model import arima_prediction
"""
.. module:: AIRPrediction
:synopsis: The driver file that is to be imported to utilize the AIRPrediction Framework.
Includes a function for input validation, prophet predictions, ARIMA predictions, and comparison of the two models.
.. moduleauthors:: Derek Pena <[email protected]>, Colin Naehr <[email protected]>, Daniel Casto <[email protected]>,
Haotian Wang <[email protected]>
"""
def validate_input(pollutant, state, county, city, date):
""" Validates the input provided by a user. To be used before any predictions are made.
:param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).
:param state: The location parameter indicating the state in the United States of America to predict for.
:param county: The location parameter indicating the county in the state to predict for.
:param city: The location parameter indicating the city in the county to predict for.
:param date: The calendar date to prediction for.
:return: A boolean that determines where validation was successful, a string that contains any error messages
and a string that rewrites the data parameter in YYYY/MM/DD format.
"""
validate = True
return_message = ""
valid_pollutants = ['NO2', 'O3', 'SO2', 'CO']
entered_datetime = ""
if pollutant == "" or state == "" or county == "" or city == "" or date == "":
return False, "Error: One or more fields left blank. Please fill out all fields.", entered_datetime
if pollutant not in valid_pollutants:
validate = False
return_message = "Error: Invalid Pollutant."
else:
if len(date) == 10:
if date[2] != '/' or date[5] != '/':
validate = False
return_message = "Error: Invalid Date Format."
month = date[:2]
day = date[3:5]
year = date[6:]
entered_datetime = datetime.datetime(int(year), int(month), int(day))
current_date = datetime.date.today().strftime('%m/%d/%Y')
current_month = current_date[:2]
current_day = current_date[3:5]
current_year = current_date[6:]
current_datetime = datetime.datetime(int(current_year), int(current_month), int(current_day))
if entered_datetime > current_datetime:
validate = True
month_string = str(entered_datetime.month)
day_string = str(entered_datetime.day)
if len(month_string) == 1:
month_string = '0' + month_string
if len(day_string) == 1:
day_string = '0' + day_string
entered_datetime = str(entered_datetime.year) + '-' + month_string + '-' + day_string
with open("data/predictable_areas.csv") as file:
location_validator = csv.reader(file)
location_validation = False
state_validation = False
county_validation = False
city_validation = False
for row in location_validator:
if row[0] == state:
state_validation = True
if row[1] == county and row[2] == city:
location_validation = True
if row[1] == county:
county_validation = True
if row[2] == city:
city_validation = True
if not location_validation:
validate = False
if state_validation and county_validation and city_validation:
return_message = "Error: State, county, and city found. However, the combination of those parameters was not found in the dataset."
else:
return_message = "Error: Following location parameters not found in the dataset:"
if not state_validation:
return_message += " State,"
if not county_validation:
return_message += " County,"
if not city_validation:
return_message += " City."
if return_message[len(return_message) - 1] == ",":
return_message = return_message[0:(len(return_message) - 1)]
return_message += "."
else:
validate = False
return_message = "Error: Invalid Date. Entered date must occur after current date."
else:
validate = False
return_message = "Error: Invalid Date Format."
return validate, return_message, entered_datetime
def prophet(pollutant, state, county, city, date):
""" A function that uses the prophet_prediction from prophet_model.py to avoid using multiple import statements.
:param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).
:param state: The location parameter indicating the state in the United States of America to predict for.
:param county: The location parameter indicating the county in the state to predict for.
:param city: The location parameter indicating the city in the county to predict for.
:param date: The calendar date to prediction for.
:return: The prediction made by the prophet model given the above parameters and the units that prediction is in.
"""
return prophet_prediction(pollutant, state, county, city, date)
def arima(pollutant, state, county, city, date):
""" A function that uses the arima_prediction from ARIMA_model.py to avoid using multiple import statements.
:param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).
:param state: The location parameter indicating the state in the United States of America to predict for.
:param county: The location parameter indicating the county in the state to predict for.
:param city: The location parameter indicating the city in the county to predict for.
:param date: The calendar date to prediction for.
:return: The prediction made by the ARIMA model given the above parameters and the units that prediction is in.
"""
return arima_prediction(pollutant, state, county, city, date)
def compare_models(pollutant, state, county, city, date):
"""The driver file that is to be imported to utilize the AIRPrediction Framework.
Includes a function for input validation, prophet predictions, ARIMA predictions, and comparison of the two models.
Module Authors: Derek Pena <[email protected]>, Colin Naehr <[email protected]>, Daniel Casto <[email protected]>,
Haotian Wang <[email protected]>
compare_models is a function that times both prediction models in order to compare their speed and their output.
:param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).
:param state: The location parameter indicating the state in the United States of America to predict for.
:param county: The location parameter indicating the county in the state to predict for.
:param city: The location parameter indicating the city in the county to predict for.
:param date: The calendar date to prediction for.
:return: A list that contains the outputs for each prediction model as well as the time taken to run them.
"""
output_list = []
start_one = time.time()
prediction, units = prophet_prediction(pollutant, state, county, city, date)
end_one = time.time()
output_list.append(prediction)
output_list.append(units)
output_list.append(end_one - start_one)
start_two = time.time()
prediction, units = arima_prediction(pollutant, state, county, city, date)
end_two = time.time()
output_list.append(prediction)
output_list.append(units)
output_list.append(end_two - start_two)
return output_list | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/AIRPrediction.py | AIRPrediction.py |
.. Air Quality documentation master file, created by
sphinx-quickstart on Fri Jul 30 20:08:13 2021.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Air Quality's documentation!
=======================================
**Air Quality** is a python framework that allows for users to make predictions about air quality for multiple locations, utilizing machine learning and other advanced models.
.. automodule:: AIRPrediction
:members:
.. automodule:: sampleapp
:members:
.. automodule:: ARIMA_model
:members:
.. automodule:: prophet_model
:members:
.. toctree::
:maxdepth: 2
:caption: Contents:
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/source/index.rst | index.rst |
Search.setIndex({docnames:["index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":4,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":3,"sphinx.domains.rst":2,"sphinx.domains.std":2,sphinx:56},filenames:["index.rst"],objects:{"":{AIRPrediction:[0,0,0,"-"],ARIMA_model:[0,0,0,"-"],prophet_model:[0,0,0,"-"],sampleapp:[0,0,0,"-"]},AIRPrediction:{arima:[0,1,1,""],compare_models:[0,1,1,""],prophet:[0,1,1,""],validate_input:[0,1,1,""]},ARIMA_model:{arima_prediction:[0,1,1,""]},prophet_model:{prophet_prediction:[0,1,1,""]},sampleapp:{MainWindow:[0,2,1,""],main:[0,1,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class"},terms:{"boolean":0,"class":0,"function":0,"import":0,"return":0,A:0,The:0,To:0,about:0,abov:0,accept:0,advanc:0,airpredict:0,all:0,allow:0,america:0,ani:0,app:0,applic:0,ar:0,arima:0,arima_model:0,arima_predict:0,author:0,avoid:0,befor:0,both:0,calendar:0,call:0,casto:0,citi:0,cnaehr:0,co:0,colin:0,compar:0,compare_model:0,comparison:0,contain:0,counti:0,creat:0,csv:0,daniel:0,danielcasto:0,data:0,dataset:0,date:0,dd:0,demonstr:0,derek:0,determin:0,driver:0,each:0,edu:0,element:0,error:0,file:0,format:0,framework:0,from:0,gener:0,given:0,haotian:0,haotianwang:0,hold:0,host:0,includ:0,index:0,input:0,learn:0,list:0,locat:0,machin:0,made:0,main:0,mainwindow:0,make:0,messag:0,mm:0,model:0,modul:0,multipl:0,naehr:0,no2:0,none:0,note:0,o3:0,open:0,order:0,other:0,output:0,page:0,param:0,paramet:0,parent:0,part:0,pena:0,pollut:0,pollution_us_2000_2016:0,ppb:0,predict:0,prepar:0,produc:0,prophet:0,prophet_model:0,prophet_predict:0,provid:0,py:0,python:0,qwidget:0,rewrit:0,run:0,sampl:0,sampleapp:0,search:0,seri:0,so2:0,specifi:0,speed:0,state:0,statement:0,string:0,success:0,taken:0,them:0,time:0,time_series_model:0,two:0,ufl:0,ui:0,unit:0,us:0,user:0,util:0,valid:0,validate_input:0,valu:0,wa:0,wang:0,well:0,where:0,window:0,yyyi:0},titles:["Welcome to Air Quality\u2019s documentation!"],titleterms:{air:0,document:0,indic:0,qualiti:0,s:0,tabl:0,welcom:0}}) | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/build/html/searchindex.js | searchindex.js |
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define('underscore', factory) :
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, (function () {
var current = global._;
var exports = global._ = factory();
exports.noConflict = function () { global._ = current; return exports; };
}()));
}(this, (function () {
// Underscore.js 1.13.1
// https://underscorejs.org
// (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
// Current version.
var VERSION = '1.13.1';
// Establish the root object, `window` (`self`) in the browser, `global`
// on the server, or `this` in some virtual machines. We use `self`
// instead of `window` for `WebWorker` support.
var root = typeof self == 'object' && self.self === self && self ||
typeof global == 'object' && global.global === global && global ||
Function('return this')() ||
{};
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype;
var SymbolProto = typeof Symbol !== 'undefined' ? Symbol.prototype : null;
// Create quick reference variables for speed access to core prototypes.
var push = ArrayProto.push,
slice = ArrayProto.slice,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// Modern feature detection.
var supportsArrayBuffer = typeof ArrayBuffer !== 'undefined',
supportsDataView = typeof DataView !== 'undefined';
// All **ECMAScript 5+** native function implementations that we hope to use
// are declared here.
var nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeCreate = Object.create,
nativeIsView = supportsArrayBuffer && ArrayBuffer.isView;
// Create references to these builtin functions because we override them.
var _isNaN = isNaN,
_isFinite = isFinite;
// Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed.
var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString');
var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString',
'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString'];
// The largest integer that can be represented exactly.
var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1;
// Some functions take a variable number of arguments, or a few expected
// arguments at the beginning and then a variable number of values to operate
// on. This helper accumulates all remaining arguments past the function’s
// argument length (or an explicit `startIndex`), into an array that becomes
// the last argument. Similar to ES6’s "rest parameter".
function restArguments(func, startIndex) {
startIndex = startIndex == null ? func.length - 1 : +startIndex;
return function() {
var length = Math.max(arguments.length - startIndex, 0),
rest = Array(length),
index = 0;
for (; index < length; index++) {
rest[index] = arguments[index + startIndex];
}
switch (startIndex) {
case 0: return func.call(this, rest);
case 1: return func.call(this, arguments[0], rest);
case 2: return func.call(this, arguments[0], arguments[1], rest);
}
var args = Array(startIndex + 1);
for (index = 0; index < startIndex; index++) {
args[index] = arguments[index];
}
args[startIndex] = rest;
return func.apply(this, args);
};
}
// Is a given variable an object?
function isObject(obj) {
var type = typeof obj;
return type === 'function' || type === 'object' && !!obj;
}
// Is a given value equal to null?
function isNull(obj) {
return obj === null;
}
// Is a given variable undefined?
function isUndefined(obj) {
return obj === void 0;
}
// Is a given value a boolean?
function isBoolean(obj) {
return obj === true || obj === false || toString.call(obj) === '[object Boolean]';
}
// Is a given value a DOM element?
function isElement(obj) {
return !!(obj && obj.nodeType === 1);
}
// Internal function for creating a `toString`-based type tester.
function tagTester(name) {
var tag = '[object ' + name + ']';
return function(obj) {
return toString.call(obj) === tag;
};
}
var isString = tagTester('String');
var isNumber = tagTester('Number');
var isDate = tagTester('Date');
var isRegExp = tagTester('RegExp');
var isError = tagTester('Error');
var isSymbol = tagTester('Symbol');
var isArrayBuffer = tagTester('ArrayBuffer');
var isFunction = tagTester('Function');
// Optimize `isFunction` if appropriate. Work around some `typeof` bugs in old
// v8, IE 11 (#1621), Safari 8 (#1929), and PhantomJS (#2236).
var nodelist = root.document && root.document.childNodes;
if (typeof /./ != 'function' && typeof Int8Array != 'object' && typeof nodelist != 'function') {
isFunction = function(obj) {
return typeof obj == 'function' || false;
};
}
var isFunction$1 = isFunction;
var hasObjectTag = tagTester('Object');
// In IE 10 - Edge 13, `DataView` has string tag `'[object Object]'`.
// In IE 11, the most common among them, this problem also applies to
// `Map`, `WeakMap` and `Set`.
var hasStringTagBug = (
supportsDataView && hasObjectTag(new DataView(new ArrayBuffer(8)))
),
isIE11 = (typeof Map !== 'undefined' && hasObjectTag(new Map));
var isDataView = tagTester('DataView');
// In IE 10 - Edge 13, we need a different heuristic
// to determine whether an object is a `DataView`.
function ie10IsDataView(obj) {
return obj != null && isFunction$1(obj.getInt8) && isArrayBuffer(obj.buffer);
}
var isDataView$1 = (hasStringTagBug ? ie10IsDataView : isDataView);
// Is a given value an array?
// Delegates to ECMA5's native `Array.isArray`.
var isArray = nativeIsArray || tagTester('Array');
// Internal function to check whether `key` is an own property name of `obj`.
function has$1(obj, key) {
return obj != null && hasOwnProperty.call(obj, key);
}
var isArguments = tagTester('Arguments');
// Define a fallback version of the method in browsers (ahem, IE < 9), where
// there isn't any inspectable "Arguments" type.
(function() {
if (!isArguments(arguments)) {
isArguments = function(obj) {
return has$1(obj, 'callee');
};
}
}());
var isArguments$1 = isArguments;
// Is a given object a finite number?
function isFinite$1(obj) {
return !isSymbol(obj) && _isFinite(obj) && !isNaN(parseFloat(obj));
}
// Is the given value `NaN`?
function isNaN$1(obj) {
return isNumber(obj) && _isNaN(obj);
}
// Predicate-generating function. Often useful outside of Underscore.
function constant(value) {
return function() {
return value;
};
}
// Common internal logic for `isArrayLike` and `isBufferLike`.
function createSizePropertyCheck(getSizeProperty) {
return function(collection) {
var sizeProperty = getSizeProperty(collection);
return typeof sizeProperty == 'number' && sizeProperty >= 0 && sizeProperty <= MAX_ARRAY_INDEX;
}
}
// Internal helper to generate a function to obtain property `key` from `obj`.
function shallowProperty(key) {
return function(obj) {
return obj == null ? void 0 : obj[key];
};
}
// Internal helper to obtain the `byteLength` property of an object.
var getByteLength = shallowProperty('byteLength');
// Internal helper to determine whether we should spend extensive checks against
// `ArrayBuffer` et al.
var isBufferLike = createSizePropertyCheck(getByteLength);
// Is a given value a typed array?
var typedArrayPattern = /\[object ((I|Ui)nt(8|16|32)|Float(32|64)|Uint8Clamped|Big(I|Ui)nt64)Array\]/;
function isTypedArray(obj) {
// `ArrayBuffer.isView` is the most future-proof, so use it when available.
// Otherwise, fall back on the above regular expression.
return nativeIsView ? (nativeIsView(obj) && !isDataView$1(obj)) :
isBufferLike(obj) && typedArrayPattern.test(toString.call(obj));
}
var isTypedArray$1 = supportsArrayBuffer ? isTypedArray : constant(false);
// Internal helper to obtain the `length` property of an object.
var getLength = shallowProperty('length');
// Internal helper to create a simple lookup structure.
// `collectNonEnumProps` used to depend on `_.contains`, but this led to
// circular imports. `emulatedSet` is a one-off solution that only works for
// arrays of strings.
function emulatedSet(keys) {
var hash = {};
for (var l = keys.length, i = 0; i < l; ++i) hash[keys[i]] = true;
return {
contains: function(key) { return hash[key]; },
push: function(key) {
hash[key] = true;
return keys.push(key);
}
};
}
// Internal helper. Checks `keys` for the presence of keys in IE < 9 that won't
// be iterated by `for key in ...` and thus missed. Extends `keys` in place if
// needed.
function collectNonEnumProps(obj, keys) {
keys = emulatedSet(keys);
var nonEnumIdx = nonEnumerableProps.length;
var constructor = obj.constructor;
var proto = isFunction$1(constructor) && constructor.prototype || ObjProto;
// Constructor is a special case.
var prop = 'constructor';
if (has$1(obj, prop) && !keys.contains(prop)) keys.push(prop);
while (nonEnumIdx--) {
prop = nonEnumerableProps[nonEnumIdx];
if (prop in obj && obj[prop] !== proto[prop] && !keys.contains(prop)) {
keys.push(prop);
}
}
}
// Retrieve the names of an object's own properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`.
function keys(obj) {
if (!isObject(obj)) return [];
if (nativeKeys) return nativeKeys(obj);
var keys = [];
for (var key in obj) if (has$1(obj, key)) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
}
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
function isEmpty(obj) {
if (obj == null) return true;
// Skip the more expensive `toString`-based type checks if `obj` has no
// `.length`.
var length = getLength(obj);
if (typeof length == 'number' && (
isArray(obj) || isString(obj) || isArguments$1(obj)
)) return length === 0;
return getLength(keys(obj)) === 0;
}
// Returns whether an object has a given set of `key:value` pairs.
function isMatch(object, attrs) {
var _keys = keys(attrs), length = _keys.length;
if (object == null) return !length;
var obj = Object(object);
for (var i = 0; i < length; i++) {
var key = _keys[i];
if (attrs[key] !== obj[key] || !(key in obj)) return false;
}
return true;
}
// If Underscore is called as a function, it returns a wrapped object that can
// be used OO-style. This wrapper holds altered versions of all functions added
// through `_.mixin`. Wrapped objects may be chained.
function _$1(obj) {
if (obj instanceof _$1) return obj;
if (!(this instanceof _$1)) return new _$1(obj);
this._wrapped = obj;
}
_$1.VERSION = VERSION;
// Extracts the result from a wrapped and chained object.
_$1.prototype.value = function() {
return this._wrapped;
};
// Provide unwrapping proxies for some methods used in engine operations
// such as arithmetic and JSON stringification.
_$1.prototype.valueOf = _$1.prototype.toJSON = _$1.prototype.value;
_$1.prototype.toString = function() {
return String(this._wrapped);
};
// Internal function to wrap or shallow-copy an ArrayBuffer,
// typed array or DataView to a new view, reusing the buffer.
function toBufferView(bufferSource) {
return new Uint8Array(
bufferSource.buffer || bufferSource,
bufferSource.byteOffset || 0,
getByteLength(bufferSource)
);
}
// We use this string twice, so give it a name for minification.
var tagDataView = '[object DataView]';
// Internal recursive comparison function for `_.isEqual`.
function eq(a, b, aStack, bStack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the [Harmony `egal` proposal](https://wiki.ecmascript.org/doku.php?id=harmony:egal).
if (a === b) return a !== 0 || 1 / a === 1 / b;
// `null` or `undefined` only equal to itself (strict comparison).
if (a == null || b == null) return false;
// `NaN`s are equivalent, but non-reflexive.
if (a !== a) return b !== b;
// Exhaust primitive checks
var type = typeof a;
if (type !== 'function' && type !== 'object' && typeof b != 'object') return false;
return deepEq(a, b, aStack, bStack);
}
// Internal recursive comparison function for `_.isEqual`.
function deepEq(a, b, aStack, bStack) {
// Unwrap any wrapped objects.
if (a instanceof _$1) a = a._wrapped;
if (b instanceof _$1) b = b._wrapped;
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className !== toString.call(b)) return false;
// Work around a bug in IE 10 - Edge 13.
if (hasStringTagBug && className == '[object Object]' && isDataView$1(a)) {
if (!isDataView$1(b)) return false;
className = tagDataView;
}
switch (className) {
// These types are compared by value.
case '[object RegExp]':
// RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i')
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return '' + a === '' + b;
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive.
// Object(NaN) is equivalent to NaN.
if (+a !== +a) return +b !== +b;
// An `egal` comparison is performed for other numeric values.
return +a === 0 ? 1 / +a === 1 / b : +a === +b;
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a === +b;
case '[object Symbol]':
return SymbolProto.valueOf.call(a) === SymbolProto.valueOf.call(b);
case '[object ArrayBuffer]':
case tagDataView:
// Coerce to typed array so we can fall through.
return deepEq(toBufferView(a), toBufferView(b), aStack, bStack);
}
var areArrays = className === '[object Array]';
if (!areArrays && isTypedArray$1(a)) {
var byteLength = getByteLength(a);
if (byteLength !== getByteLength(b)) return false;
if (a.buffer === b.buffer && a.byteOffset === b.byteOffset) return true;
areArrays = true;
}
if (!areArrays) {
if (typeof a != 'object' || typeof b != 'object') return false;
// Objects with different constructors are not equivalent, but `Object`s or `Array`s
// from different frames are.
var aCtor = a.constructor, bCtor = b.constructor;
if (aCtor !== bCtor && !(isFunction$1(aCtor) && aCtor instanceof aCtor &&
isFunction$1(bCtor) && bCtor instanceof bCtor)
&& ('constructor' in a && 'constructor' in b)) {
return false;
}
}
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
// Initializing stack of traversed objects.
// It's done here since we only need them for objects and arrays comparison.
aStack = aStack || [];
bStack = bStack || [];
var length = aStack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (aStack[length] === a) return bStack[length] === b;
}
// Add the first object to the stack of traversed objects.
aStack.push(a);
bStack.push(b);
// Recursively compare objects and arrays.
if (areArrays) {
// Compare array lengths to determine if a deep comparison is necessary.
length = a.length;
if (length !== b.length) return false;
// Deep compare the contents, ignoring non-numeric properties.
while (length--) {
if (!eq(a[length], b[length], aStack, bStack)) return false;
}
} else {
// Deep compare objects.
var _keys = keys(a), key;
length = _keys.length;
// Ensure that both objects contain the same number of properties before comparing deep equality.
if (keys(b).length !== length) return false;
while (length--) {
// Deep compare each member
key = _keys[length];
if (!(has$1(b, key) && eq(a[key], b[key], aStack, bStack))) return false;
}
}
// Remove the first object from the stack of traversed objects.
aStack.pop();
bStack.pop();
return true;
}
// Perform a deep comparison to check if two objects are equal.
function isEqual(a, b) {
return eq(a, b);
}
// Retrieve all the enumerable property names of an object.
function allKeys(obj) {
if (!isObject(obj)) return [];
var keys = [];
for (var key in obj) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
}
// Since the regular `Object.prototype.toString` type tests don't work for
// some types in IE 11, we use a fingerprinting heuristic instead, based
// on the methods. It's not great, but it's the best we got.
// The fingerprint method lists are defined below.
function ie11fingerprint(methods) {
var length = getLength(methods);
return function(obj) {
if (obj == null) return false;
// `Map`, `WeakMap` and `Set` have no enumerable keys.
var keys = allKeys(obj);
if (getLength(keys)) return false;
for (var i = 0; i < length; i++) {
if (!isFunction$1(obj[methods[i]])) return false;
}
// If we are testing against `WeakMap`, we need to ensure that
// `obj` doesn't have a `forEach` method in order to distinguish
// it from a regular `Map`.
return methods !== weakMapMethods || !isFunction$1(obj[forEachName]);
};
}
// In the interest of compact minification, we write
// each string in the fingerprints only once.
var forEachName = 'forEach',
hasName = 'has',
commonInit = ['clear', 'delete'],
mapTail = ['get', hasName, 'set'];
// `Map`, `WeakMap` and `Set` each have slightly different
// combinations of the above sublists.
var mapMethods = commonInit.concat(forEachName, mapTail),
weakMapMethods = commonInit.concat(mapTail),
setMethods = ['add'].concat(commonInit, forEachName, hasName);
var isMap = isIE11 ? ie11fingerprint(mapMethods) : tagTester('Map');
var isWeakMap = isIE11 ? ie11fingerprint(weakMapMethods) : tagTester('WeakMap');
var isSet = isIE11 ? ie11fingerprint(setMethods) : tagTester('Set');
var isWeakSet = tagTester('WeakSet');
// Retrieve the values of an object's properties.
function values(obj) {
var _keys = keys(obj);
var length = _keys.length;
var values = Array(length);
for (var i = 0; i < length; i++) {
values[i] = obj[_keys[i]];
}
return values;
}
// Convert an object into a list of `[key, value]` pairs.
// The opposite of `_.object` with one argument.
function pairs(obj) {
var _keys = keys(obj);
var length = _keys.length;
var pairs = Array(length);
for (var i = 0; i < length; i++) {
pairs[i] = [_keys[i], obj[_keys[i]]];
}
return pairs;
}
// Invert the keys and values of an object. The values must be serializable.
function invert(obj) {
var result = {};
var _keys = keys(obj);
for (var i = 0, length = _keys.length; i < length; i++) {
result[obj[_keys[i]]] = _keys[i];
}
return result;
}
// Return a sorted list of the function names available on the object.
function functions(obj) {
var names = [];
for (var key in obj) {
if (isFunction$1(obj[key])) names.push(key);
}
return names.sort();
}
// An internal function for creating assigner functions.
function createAssigner(keysFunc, defaults) {
return function(obj) {
var length = arguments.length;
if (defaults) obj = Object(obj);
if (length < 2 || obj == null) return obj;
for (var index = 1; index < length; index++) {
var source = arguments[index],
keys = keysFunc(source),
l = keys.length;
for (var i = 0; i < l; i++) {
var key = keys[i];
if (!defaults || obj[key] === void 0) obj[key] = source[key];
}
}
return obj;
};
}
// Extend a given object with all the properties in passed-in object(s).
var extend = createAssigner(allKeys);
// Assigns a given object with all the own properties in the passed-in
// object(s).
// (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign)
var extendOwn = createAssigner(keys);
// Fill in a given object with default properties.
var defaults = createAssigner(allKeys, true);
// Create a naked function reference for surrogate-prototype-swapping.
function ctor() {
return function(){};
}
// An internal function for creating a new object that inherits from another.
function baseCreate(prototype) {
if (!isObject(prototype)) return {};
if (nativeCreate) return nativeCreate(prototype);
var Ctor = ctor();
Ctor.prototype = prototype;
var result = new Ctor;
Ctor.prototype = null;
return result;
}
// Creates an object that inherits from the given prototype object.
// If additional properties are provided then they will be added to the
// created object.
function create(prototype, props) {
var result = baseCreate(prototype);
if (props) extendOwn(result, props);
return result;
}
// Create a (shallow-cloned) duplicate of an object.
function clone(obj) {
if (!isObject(obj)) return obj;
return isArray(obj) ? obj.slice() : extend({}, obj);
}
// Invokes `interceptor` with the `obj` and then returns `obj`.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
function tap(obj, interceptor) {
interceptor(obj);
return obj;
}
// Normalize a (deep) property `path` to array.
// Like `_.iteratee`, this function can be customized.
function toPath$1(path) {
return isArray(path) ? path : [path];
}
_$1.toPath = toPath$1;
// Internal wrapper for `_.toPath` to enable minification.
// Similar to `cb` for `_.iteratee`.
function toPath(path) {
return _$1.toPath(path);
}
// Internal function to obtain a nested property in `obj` along `path`.
function deepGet(obj, path) {
var length = path.length;
for (var i = 0; i < length; i++) {
if (obj == null) return void 0;
obj = obj[path[i]];
}
return length ? obj : void 0;
}
// Get the value of the (deep) property on `path` from `object`.
// If any property in `path` does not exist or if the value is
// `undefined`, return `defaultValue` instead.
// The `path` is normalized through `_.toPath`.
function get(object, path, defaultValue) {
var value = deepGet(object, toPath(path));
return isUndefined(value) ? defaultValue : value;
}
// Shortcut function for checking if an object has a given property directly on
// itself (in other words, not on a prototype). Unlike the internal `has`
// function, this public version can also traverse nested properties.
function has(obj, path) {
path = toPath(path);
var length = path.length;
for (var i = 0; i < length; i++) {
var key = path[i];
if (!has$1(obj, key)) return false;
obj = obj[key];
}
return !!length;
}
// Keep the identity function around for default iteratees.
function identity(value) {
return value;
}
// Returns a predicate for checking whether an object has a given set of
// `key:value` pairs.
function matcher(attrs) {
attrs = extendOwn({}, attrs);
return function(obj) {
return isMatch(obj, attrs);
};
}
// Creates a function that, when passed an object, will traverse that object’s
// properties down the given `path`, specified as an array of keys or indices.
function property(path) {
path = toPath(path);
return function(obj) {
return deepGet(obj, path);
};
}
// Internal function that returns an efficient (for current engines) version
// of the passed-in callback, to be repeatedly applied in other Underscore
// functions.
function optimizeCb(func, context, argCount) {
if (context === void 0) return func;
switch (argCount == null ? 3 : argCount) {
case 1: return function(value) {
return func.call(context, value);
};
// The 2-argument case is omitted because we’re not using it.
case 3: return function(value, index, collection) {
return func.call(context, value, index, collection);
};
case 4: return function(accumulator, value, index, collection) {
return func.call(context, accumulator, value, index, collection);
};
}
return function() {
return func.apply(context, arguments);
};
}
// An internal function to generate callbacks that can be applied to each
// element in a collection, returning the desired result — either `_.identity`,
// an arbitrary callback, a property matcher, or a property accessor.
function baseIteratee(value, context, argCount) {
if (value == null) return identity;
if (isFunction$1(value)) return optimizeCb(value, context, argCount);
if (isObject(value) && !isArray(value)) return matcher(value);
return property(value);
}
// External wrapper for our callback generator. Users may customize
// `_.iteratee` if they want additional predicate/iteratee shorthand styles.
// This abstraction hides the internal-only `argCount` argument.
function iteratee(value, context) {
return baseIteratee(value, context, Infinity);
}
_$1.iteratee = iteratee;
// The function we call internally to generate a callback. It invokes
// `_.iteratee` if overridden, otherwise `baseIteratee`.
function cb(value, context, argCount) {
if (_$1.iteratee !== iteratee) return _$1.iteratee(value, context);
return baseIteratee(value, context, argCount);
}
// Returns the results of applying the `iteratee` to each element of `obj`.
// In contrast to `_.map` it returns an object.
function mapObject(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var _keys = keys(obj),
length = _keys.length,
results = {};
for (var index = 0; index < length; index++) {
var currentKey = _keys[index];
results[currentKey] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
}
// Predicate-generating function. Often useful outside of Underscore.
function noop(){}
// Generates a function for a given object that returns a given property.
function propertyOf(obj) {
if (obj == null) return noop;
return function(path) {
return get(obj, path);
};
}
// Run a function **n** times.
function times(n, iteratee, context) {
var accum = Array(Math.max(0, n));
iteratee = optimizeCb(iteratee, context, 1);
for (var i = 0; i < n; i++) accum[i] = iteratee(i);
return accum;
}
// Return a random integer between `min` and `max` (inclusive).
function random(min, max) {
if (max == null) {
max = min;
min = 0;
}
return min + Math.floor(Math.random() * (max - min + 1));
}
// A (possibly faster) way to get the current timestamp as an integer.
var now = Date.now || function() {
return new Date().getTime();
};
// Internal helper to generate functions for escaping and unescaping strings
// to/from HTML interpolation.
function createEscaper(map) {
var escaper = function(match) {
return map[match];
};
// Regexes for identifying a key that needs to be escaped.
var source = '(?:' + keys(map).join('|') + ')';
var testRegexp = RegExp(source);
var replaceRegexp = RegExp(source, 'g');
return function(string) {
string = string == null ? '' : '' + string;
return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string;
};
}
// Internal list of HTML entities for escaping.
var escapeMap = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": ''',
'`': '`'
};
// Function for escaping strings to HTML interpolation.
var _escape = createEscaper(escapeMap);
// Internal list of HTML entities for unescaping.
var unescapeMap = invert(escapeMap);
// Function for unescaping strings from HTML interpolation.
var _unescape = createEscaper(unescapeMap);
// By default, Underscore uses ERB-style template delimiters. Change the
// following template settings to use alternative delimiters.
var templateSettings = _$1.templateSettings = {
evaluate: /<%([\s\S]+?)%>/g,
interpolate: /<%=([\s\S]+?)%>/g,
escape: /<%-([\s\S]+?)%>/g
};
// When customizing `_.templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /(.)^/;
// Certain characters need to be escaped so that they can be put into a
// string literal.
var escapes = {
"'": "'",
'\\': '\\',
'\r': 'r',
'\n': 'n',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
var escapeRegExp = /\\|'|\r|\n|\u2028|\u2029/g;
function escapeChar(match) {
return '\\' + escapes[match];
}
// In order to prevent third-party code injection through
// `_.templateSettings.variable`, we test it against the following regular
// expression. It is intentionally a bit more liberal than just matching valid
// identifiers, but still prevents possible loopholes through defaults or
// destructuring assignment.
var bareIdentifier = /^\s*(\w|\$)+\s*$/;
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
// NB: `oldSettings` only exists for backwards compatibility.
function template(text, settings, oldSettings) {
if (!settings && oldSettings) settings = oldSettings;
settings = defaults({}, settings, _$1.templateSettings);
// Combine delimiters into one regular expression via alternation.
var matcher = RegExp([
(settings.escape || noMatch).source,
(settings.interpolate || noMatch).source,
(settings.evaluate || noMatch).source
].join('|') + '|$', 'g');
// Compile the template source, escaping string literals appropriately.
var index = 0;
var source = "__p+='";
text.replace(matcher, function(match, escape, interpolate, evaluate, offset) {
source += text.slice(index, offset).replace(escapeRegExp, escapeChar);
index = offset + match.length;
if (escape) {
source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'";
} else if (interpolate) {
source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'";
} else if (evaluate) {
source += "';\n" + evaluate + "\n__p+='";
}
// Adobe VMs need the match returned to produce the correct offset.
return match;
});
source += "';\n";
var argument = settings.variable;
if (argument) {
// Insure against third-party code injection. (CVE-2021-23358)
if (!bareIdentifier.test(argument)) throw new Error(
'variable is not a bare identifier: ' + argument
);
} else {
// If a variable is not specified, place data values in local scope.
source = 'with(obj||{}){\n' + source + '}\n';
argument = 'obj';
}
source = "var __t,__p='',__j=Array.prototype.join," +
"print=function(){__p+=__j.call(arguments,'');};\n" +
source + 'return __p;\n';
var render;
try {
render = new Function(argument, '_', source);
} catch (e) {
e.source = source;
throw e;
}
var template = function(data) {
return render.call(this, data, _$1);
};
// Provide the compiled source as a convenience for precompilation.
template.source = 'function(' + argument + '){\n' + source + '}';
return template;
}
// Traverses the children of `obj` along `path`. If a child is a function, it
// is invoked with its parent as context. Returns the value of the final
// child, or `fallback` if any child is undefined.
function result(obj, path, fallback) {
path = toPath(path);
var length = path.length;
if (!length) {
return isFunction$1(fallback) ? fallback.call(obj) : fallback;
}
for (var i = 0; i < length; i++) {
var prop = obj == null ? void 0 : obj[path[i]];
if (prop === void 0) {
prop = fallback;
i = length; // Ensure we don't continue iterating.
}
obj = isFunction$1(prop) ? prop.call(obj) : prop;
}
return obj;
}
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
function uniqueId(prefix) {
var id = ++idCounter + '';
return prefix ? prefix + id : id;
}
// Start chaining a wrapped Underscore object.
function chain(obj) {
var instance = _$1(obj);
instance._chain = true;
return instance;
}
// Internal function to execute `sourceFunc` bound to `context` with optional
// `args`. Determines whether to execute a function as a constructor or as a
// normal function.
function executeBound(sourceFunc, boundFunc, context, callingContext, args) {
if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args);
var self = baseCreate(sourceFunc.prototype);
var result = sourceFunc.apply(self, args);
if (isObject(result)) return result;
return self;
}
// Partially apply a function by creating a version that has had some of its
// arguments pre-filled, without changing its dynamic `this` context. `_` acts
// as a placeholder by default, allowing any combination of arguments to be
// pre-filled. Set `_.partial.placeholder` for a custom placeholder argument.
var partial = restArguments(function(func, boundArgs) {
var placeholder = partial.placeholder;
var bound = function() {
var position = 0, length = boundArgs.length;
var args = Array(length);
for (var i = 0; i < length; i++) {
args[i] = boundArgs[i] === placeholder ? arguments[position++] : boundArgs[i];
}
while (position < arguments.length) args.push(arguments[position++]);
return executeBound(func, bound, this, this, args);
};
return bound;
});
partial.placeholder = _$1;
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally).
var bind = restArguments(function(func, context, args) {
if (!isFunction$1(func)) throw new TypeError('Bind must be called on a function');
var bound = restArguments(function(callArgs) {
return executeBound(func, bound, context, this, args.concat(callArgs));
});
return bound;
});
// Internal helper for collection methods to determine whether a collection
// should be iterated as an array or as an object.
// Related: https://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength
// Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094
var isArrayLike = createSizePropertyCheck(getLength);
// Internal implementation of a recursive `flatten` function.
function flatten$1(input, depth, strict, output) {
output = output || [];
if (!depth && depth !== 0) {
depth = Infinity;
} else if (depth <= 0) {
return output.concat(input);
}
var idx = output.length;
for (var i = 0, length = getLength(input); i < length; i++) {
var value = input[i];
if (isArrayLike(value) && (isArray(value) || isArguments$1(value))) {
// Flatten current level of array or arguments object.
if (depth > 1) {
flatten$1(value, depth - 1, strict, output);
idx = output.length;
} else {
var j = 0, len = value.length;
while (j < len) output[idx++] = value[j++];
}
} else if (!strict) {
output[idx++] = value;
}
}
return output;
}
// Bind a number of an object's methods to that object. Remaining arguments
// are the method names to be bound. Useful for ensuring that all callbacks
// defined on an object belong to it.
var bindAll = restArguments(function(obj, keys) {
keys = flatten$1(keys, false, false);
var index = keys.length;
if (index < 1) throw new Error('bindAll must be passed function names');
while (index--) {
var key = keys[index];
obj[key] = bind(obj[key], obj);
}
return obj;
});
// Memoize an expensive function by storing its results.
function memoize(func, hasher) {
var memoize = function(key) {
var cache = memoize.cache;
var address = '' + (hasher ? hasher.apply(this, arguments) : key);
if (!has$1(cache, address)) cache[address] = func.apply(this, arguments);
return cache[address];
};
memoize.cache = {};
return memoize;
}
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
var delay = restArguments(function(func, wait, args) {
return setTimeout(function() {
return func.apply(null, args);
}, wait);
});
// Defers a function, scheduling it to run after the current call stack has
// cleared.
var defer = partial(delay, _$1, 1);
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time. Normally, the throttled function will run
// as much as it can, without ever going more than once per `wait` duration;
// but if you'd like to disable the execution on the leading edge, pass
// `{leading: false}`. To disable execution on the trailing edge, ditto.
function throttle(func, wait, options) {
var timeout, context, args, result;
var previous = 0;
if (!options) options = {};
var later = function() {
previous = options.leading === false ? 0 : now();
timeout = null;
result = func.apply(context, args);
if (!timeout) context = args = null;
};
var throttled = function() {
var _now = now();
if (!previous && options.leading === false) previous = _now;
var remaining = wait - (_now - previous);
context = this;
args = arguments;
if (remaining <= 0 || remaining > wait) {
if (timeout) {
clearTimeout(timeout);
timeout = null;
}
previous = _now;
result = func.apply(context, args);
if (!timeout) context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = setTimeout(later, remaining);
}
return result;
};
throttled.cancel = function() {
clearTimeout(timeout);
previous = 0;
timeout = context = args = null;
};
return throttled;
}
// When a sequence of calls of the returned function ends, the argument
// function is triggered. The end of a sequence is defined by the `wait`
// parameter. If `immediate` is passed, the argument function will be
// triggered at the beginning of the sequence instead of at the end.
function debounce(func, wait, immediate) {
var timeout, previous, args, result, context;
var later = function() {
var passed = now() - previous;
if (wait > passed) {
timeout = setTimeout(later, wait - passed);
} else {
timeout = null;
if (!immediate) result = func.apply(context, args);
// This check is needed because `func` can recursively invoke `debounced`.
if (!timeout) args = context = null;
}
};
var debounced = restArguments(function(_args) {
context = this;
args = _args;
previous = now();
if (!timeout) {
timeout = setTimeout(later, wait);
if (immediate) result = func.apply(context, args);
}
return result;
});
debounced.cancel = function() {
clearTimeout(timeout);
timeout = args = context = null;
};
return debounced;
}
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
function wrap(func, wrapper) {
return partial(wrapper, func);
}
// Returns a negated version of the passed-in predicate.
function negate(predicate) {
return function() {
return !predicate.apply(this, arguments);
};
}
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
function compose() {
var args = arguments;
var start = args.length - 1;
return function() {
var i = start;
var result = args[start].apply(this, arguments);
while (i--) result = args[i].call(this, result);
return result;
};
}
// Returns a function that will only be executed on and after the Nth call.
function after(times, func) {
return function() {
if (--times < 1) {
return func.apply(this, arguments);
}
};
}
// Returns a function that will only be executed up to (but not including) the
// Nth call.
function before(times, func) {
var memo;
return function() {
if (--times > 0) {
memo = func.apply(this, arguments);
}
if (times <= 1) func = null;
return memo;
};
}
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
var once = partial(before, 2);
// Returns the first key on an object that passes a truth test.
function findKey(obj, predicate, context) {
predicate = cb(predicate, context);
var _keys = keys(obj), key;
for (var i = 0, length = _keys.length; i < length; i++) {
key = _keys[i];
if (predicate(obj[key], key, obj)) return key;
}
}
// Internal function to generate `_.findIndex` and `_.findLastIndex`.
function createPredicateIndexFinder(dir) {
return function(array, predicate, context) {
predicate = cb(predicate, context);
var length = getLength(array);
var index = dir > 0 ? 0 : length - 1;
for (; index >= 0 && index < length; index += dir) {
if (predicate(array[index], index, array)) return index;
}
return -1;
};
}
// Returns the first index on an array-like that passes a truth test.
var findIndex = createPredicateIndexFinder(1);
// Returns the last index on an array-like that passes a truth test.
var findLastIndex = createPredicateIndexFinder(-1);
// Use a comparator function to figure out the smallest index at which
// an object should be inserted so as to maintain order. Uses binary search.
function sortedIndex(array, obj, iteratee, context) {
iteratee = cb(iteratee, context, 1);
var value = iteratee(obj);
var low = 0, high = getLength(array);
while (low < high) {
var mid = Math.floor((low + high) / 2);
if (iteratee(array[mid]) < value) low = mid + 1; else high = mid;
}
return low;
}
// Internal function to generate the `_.indexOf` and `_.lastIndexOf` functions.
function createIndexFinder(dir, predicateFind, sortedIndex) {
return function(array, item, idx) {
var i = 0, length = getLength(array);
if (typeof idx == 'number') {
if (dir > 0) {
i = idx >= 0 ? idx : Math.max(idx + length, i);
} else {
length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1;
}
} else if (sortedIndex && idx && length) {
idx = sortedIndex(array, item);
return array[idx] === item ? idx : -1;
}
if (item !== item) {
idx = predicateFind(slice.call(array, i, length), isNaN$1);
return idx >= 0 ? idx + i : -1;
}
for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) {
if (array[idx] === item) return idx;
}
return -1;
};
}
// Return the position of the first occurrence of an item in an array,
// or -1 if the item is not included in the array.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
var indexOf = createIndexFinder(1, findIndex, sortedIndex);
// Return the position of the last occurrence of an item in an array,
// or -1 if the item is not included in the array.
var lastIndexOf = createIndexFinder(-1, findLastIndex);
// Return the first value which passes a truth test.
function find(obj, predicate, context) {
var keyFinder = isArrayLike(obj) ? findIndex : findKey;
var key = keyFinder(obj, predicate, context);
if (key !== void 0 && key !== -1) return obj[key];
}
// Convenience version of a common use case of `_.find`: getting the first
// object containing specific `key:value` pairs.
function findWhere(obj, attrs) {
return find(obj, matcher(attrs));
}
// The cornerstone for collection functions, an `each`
// implementation, aka `forEach`.
// Handles raw objects in addition to array-likes. Treats all
// sparse array-likes as if they were dense.
function each(obj, iteratee, context) {
iteratee = optimizeCb(iteratee, context);
var i, length;
if (isArrayLike(obj)) {
for (i = 0, length = obj.length; i < length; i++) {
iteratee(obj[i], i, obj);
}
} else {
var _keys = keys(obj);
for (i = 0, length = _keys.length; i < length; i++) {
iteratee(obj[_keys[i]], _keys[i], obj);
}
}
return obj;
}
// Return the results of applying the iteratee to each element.
function map(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var _keys = !isArrayLike(obj) && keys(obj),
length = (_keys || obj).length,
results = Array(length);
for (var index = 0; index < length; index++) {
var currentKey = _keys ? _keys[index] : index;
results[index] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
}
// Internal helper to create a reducing function, iterating left or right.
function createReduce(dir) {
// Wrap code that reassigns argument variables in a separate function than
// the one that accesses `arguments.length` to avoid a perf hit. (#1991)
var reducer = function(obj, iteratee, memo, initial) {
var _keys = !isArrayLike(obj) && keys(obj),
length = (_keys || obj).length,
index = dir > 0 ? 0 : length - 1;
if (!initial) {
memo = obj[_keys ? _keys[index] : index];
index += dir;
}
for (; index >= 0 && index < length; index += dir) {
var currentKey = _keys ? _keys[index] : index;
memo = iteratee(memo, obj[currentKey], currentKey, obj);
}
return memo;
};
return function(obj, iteratee, memo, context) {
var initial = arguments.length >= 3;
return reducer(obj, optimizeCb(iteratee, context, 4), memo, initial);
};
}
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`.
var reduce = createReduce(1);
// The right-associative version of reduce, also known as `foldr`.
var reduceRight = createReduce(-1);
// Return all the elements that pass a truth test.
function filter(obj, predicate, context) {
var results = [];
predicate = cb(predicate, context);
each(obj, function(value, index, list) {
if (predicate(value, index, list)) results.push(value);
});
return results;
}
// Return all the elements for which a truth test fails.
function reject(obj, predicate, context) {
return filter(obj, negate(cb(predicate)), context);
}
// Determine whether all of the elements pass a truth test.
function every(obj, predicate, context) {
predicate = cb(predicate, context);
var _keys = !isArrayLike(obj) && keys(obj),
length = (_keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = _keys ? _keys[index] : index;
if (!predicate(obj[currentKey], currentKey, obj)) return false;
}
return true;
}
// Determine if at least one element in the object passes a truth test.
function some(obj, predicate, context) {
predicate = cb(predicate, context);
var _keys = !isArrayLike(obj) && keys(obj),
length = (_keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = _keys ? _keys[index] : index;
if (predicate(obj[currentKey], currentKey, obj)) return true;
}
return false;
}
// Determine if the array or object contains a given item (using `===`).
function contains(obj, item, fromIndex, guard) {
if (!isArrayLike(obj)) obj = values(obj);
if (typeof fromIndex != 'number' || guard) fromIndex = 0;
return indexOf(obj, item, fromIndex) >= 0;
}
// Invoke a method (with arguments) on every item in a collection.
var invoke = restArguments(function(obj, path, args) {
var contextPath, func;
if (isFunction$1(path)) {
func = path;
} else {
path = toPath(path);
contextPath = path.slice(0, -1);
path = path[path.length - 1];
}
return map(obj, function(context) {
var method = func;
if (!method) {
if (contextPath && contextPath.length) {
context = deepGet(context, contextPath);
}
if (context == null) return void 0;
method = context[path];
}
return method == null ? method : method.apply(context, args);
});
});
// Convenience version of a common use case of `_.map`: fetching a property.
function pluck(obj, key) {
return map(obj, property(key));
}
// Convenience version of a common use case of `_.filter`: selecting only
// objects containing specific `key:value` pairs.
function where(obj, attrs) {
return filter(obj, matcher(attrs));
}
// Return the maximum element (or element-based computation).
function max(obj, iteratee, context) {
var result = -Infinity, lastComputed = -Infinity,
value, computed;
if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) {
obj = isArrayLike(obj) ? obj : values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value != null && value > result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
each(obj, function(v, index, list) {
computed = iteratee(v, index, list);
if (computed > lastComputed || computed === -Infinity && result === -Infinity) {
result = v;
lastComputed = computed;
}
});
}
return result;
}
// Return the minimum element (or element-based computation).
function min(obj, iteratee, context) {
var result = Infinity, lastComputed = Infinity,
value, computed;
if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) {
obj = isArrayLike(obj) ? obj : values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value != null && value < result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
each(obj, function(v, index, list) {
computed = iteratee(v, index, list);
if (computed < lastComputed || computed === Infinity && result === Infinity) {
result = v;
lastComputed = computed;
}
});
}
return result;
}
// Sample **n** random values from a collection using the modern version of the
// [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher–Yates_shuffle).
// If **n** is not specified, returns a single random element.
// The internal `guard` argument allows it to work with `_.map`.
function sample(obj, n, guard) {
if (n == null || guard) {
if (!isArrayLike(obj)) obj = values(obj);
return obj[random(obj.length - 1)];
}
var sample = isArrayLike(obj) ? clone(obj) : values(obj);
var length = getLength(sample);
n = Math.max(Math.min(n, length), 0);
var last = length - 1;
for (var index = 0; index < n; index++) {
var rand = random(index, last);
var temp = sample[index];
sample[index] = sample[rand];
sample[rand] = temp;
}
return sample.slice(0, n);
}
// Shuffle a collection.
function shuffle(obj) {
return sample(obj, Infinity);
}
// Sort the object's values by a criterion produced by an iteratee.
function sortBy(obj, iteratee, context) {
var index = 0;
iteratee = cb(iteratee, context);
return pluck(map(obj, function(value, key, list) {
return {
value: value,
index: index++,
criteria: iteratee(value, key, list)
};
}).sort(function(left, right) {
var a = left.criteria;
var b = right.criteria;
if (a !== b) {
if (a > b || a === void 0) return 1;
if (a < b || b === void 0) return -1;
}
return left.index - right.index;
}), 'value');
}
// An internal function used for aggregate "group by" operations.
function group(behavior, partition) {
return function(obj, iteratee, context) {
var result = partition ? [[], []] : {};
iteratee = cb(iteratee, context);
each(obj, function(value, index) {
var key = iteratee(value, index, obj);
behavior(result, value, key);
});
return result;
};
}
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
var groupBy = group(function(result, value, key) {
if (has$1(result, key)) result[key].push(value); else result[key] = [value];
});
// Indexes the object's values by a criterion, similar to `_.groupBy`, but for
// when you know that your index values will be unique.
var indexBy = group(function(result, value, key) {
result[key] = value;
});
// Counts instances of an object that group by a certain criterion. Pass
// either a string attribute to count by, or a function that returns the
// criterion.
var countBy = group(function(result, value, key) {
if (has$1(result, key)) result[key]++; else result[key] = 1;
});
// Split a collection into two arrays: one whose elements all pass the given
// truth test, and one whose elements all do not pass the truth test.
var partition = group(function(result, value, pass) {
result[pass ? 0 : 1].push(value);
}, true);
// Safely create a real, live array from anything iterable.
var reStrSymbol = /[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;
function toArray(obj) {
if (!obj) return [];
if (isArray(obj)) return slice.call(obj);
if (isString(obj)) {
// Keep surrogate pair characters together.
return obj.match(reStrSymbol);
}
if (isArrayLike(obj)) return map(obj, identity);
return values(obj);
}
// Return the number of elements in a collection.
function size(obj) {
if (obj == null) return 0;
return isArrayLike(obj) ? obj.length : keys(obj).length;
}
// Internal `_.pick` helper function to determine whether `key` is an enumerable
// property name of `obj`.
function keyInObj(value, key, obj) {
return key in obj;
}
// Return a copy of the object only containing the allowed properties.
var pick = restArguments(function(obj, keys) {
var result = {}, iteratee = keys[0];
if (obj == null) return result;
if (isFunction$1(iteratee)) {
if (keys.length > 1) iteratee = optimizeCb(iteratee, keys[1]);
keys = allKeys(obj);
} else {
iteratee = keyInObj;
keys = flatten$1(keys, false, false);
obj = Object(obj);
}
for (var i = 0, length = keys.length; i < length; i++) {
var key = keys[i];
var value = obj[key];
if (iteratee(value, key, obj)) result[key] = value;
}
return result;
});
// Return a copy of the object without the disallowed properties.
var omit = restArguments(function(obj, keys) {
var iteratee = keys[0], context;
if (isFunction$1(iteratee)) {
iteratee = negate(iteratee);
if (keys.length > 1) context = keys[1];
} else {
keys = map(flatten$1(keys, false, false), String);
iteratee = function(value, key) {
return !contains(keys, key);
};
}
return pick(obj, iteratee, context);
});
// Returns everything but the last entry of the array. Especially useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N.
function initial(array, n, guard) {
return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n)));
}
// Get the first element of an array. Passing **n** will return the first N
// values in the array. The **guard** check allows it to work with `_.map`.
function first(array, n, guard) {
if (array == null || array.length < 1) return n == null || guard ? void 0 : [];
if (n == null || guard) return array[0];
return initial(array, array.length - n);
}
// Returns everything but the first entry of the `array`. Especially useful on
// the `arguments` object. Passing an **n** will return the rest N values in the
// `array`.
function rest(array, n, guard) {
return slice.call(array, n == null || guard ? 1 : n);
}
// Get the last element of an array. Passing **n** will return the last N
// values in the array.
function last(array, n, guard) {
if (array == null || array.length < 1) return n == null || guard ? void 0 : [];
if (n == null || guard) return array[array.length - 1];
return rest(array, Math.max(0, array.length - n));
}
// Trim out all falsy values from an array.
function compact(array) {
return filter(array, Boolean);
}
// Flatten out an array, either recursively (by default), or up to `depth`.
// Passing `true` or `false` as `depth` means `1` or `Infinity`, respectively.
function flatten(array, depth) {
return flatten$1(array, depth, false);
}
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
var difference = restArguments(function(array, rest) {
rest = flatten$1(rest, true, true);
return filter(array, function(value){
return !contains(rest, value);
});
});
// Return a version of the array that does not contain the specified value(s).
var without = restArguments(function(array, otherArrays) {
return difference(array, otherArrays);
});
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// The faster algorithm will not work with an iteratee if the iteratee
// is not a one-to-one function, so providing an iteratee will disable
// the faster algorithm.
function uniq(array, isSorted, iteratee, context) {
if (!isBoolean(isSorted)) {
context = iteratee;
iteratee = isSorted;
isSorted = false;
}
if (iteratee != null) iteratee = cb(iteratee, context);
var result = [];
var seen = [];
for (var i = 0, length = getLength(array); i < length; i++) {
var value = array[i],
computed = iteratee ? iteratee(value, i, array) : value;
if (isSorted && !iteratee) {
if (!i || seen !== computed) result.push(value);
seen = computed;
} else if (iteratee) {
if (!contains(seen, computed)) {
seen.push(computed);
result.push(value);
}
} else if (!contains(result, value)) {
result.push(value);
}
}
return result;
}
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
var union = restArguments(function(arrays) {
return uniq(flatten$1(arrays, true, true));
});
// Produce an array that contains every item shared between all the
// passed-in arrays.
function intersection(array) {
var result = [];
var argsLength = arguments.length;
for (var i = 0, length = getLength(array); i < length; i++) {
var item = array[i];
if (contains(result, item)) continue;
var j;
for (j = 1; j < argsLength; j++) {
if (!contains(arguments[j], item)) break;
}
if (j === argsLength) result.push(item);
}
return result;
}
// Complement of zip. Unzip accepts an array of arrays and groups
// each array's elements on shared indices.
function unzip(array) {
var length = array && max(array, getLength).length || 0;
var result = Array(length);
for (var index = 0; index < length; index++) {
result[index] = pluck(array, index);
}
return result;
}
// Zip together multiple lists into a single array -- elements that share
// an index go together.
var zip = restArguments(unzip);
// Converts lists into objects. Pass either a single array of `[key, value]`
// pairs, or two parallel arrays of the same length -- one of keys, and one of
// the corresponding values. Passing by pairs is the reverse of `_.pairs`.
function object(list, values) {
var result = {};
for (var i = 0, length = getLength(list); i < length; i++) {
if (values) {
result[list[i]] = values[i];
} else {
result[list[i][0]] = list[i][1];
}
}
return result;
}
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](https://docs.python.org/library/functions.html#range).
function range(start, stop, step) {
if (stop == null) {
stop = start || 0;
start = 0;
}
if (!step) {
step = stop < start ? -1 : 1;
}
var length = Math.max(Math.ceil((stop - start) / step), 0);
var range = Array(length);
for (var idx = 0; idx < length; idx++, start += step) {
range[idx] = start;
}
return range;
}
// Chunk a single array into multiple arrays, each containing `count` or fewer
// items.
function chunk(array, count) {
if (count == null || count < 1) return [];
var result = [];
var i = 0, length = array.length;
while (i < length) {
result.push(slice.call(array, i, i += count));
}
return result;
}
// Helper function to continue chaining intermediate results.
function chainResult(instance, obj) {
return instance._chain ? _$1(obj).chain() : obj;
}
// Add your own custom functions to the Underscore object.
function mixin(obj) {
each(functions(obj), function(name) {
var func = _$1[name] = obj[name];
_$1.prototype[name] = function() {
var args = [this._wrapped];
push.apply(args, arguments);
return chainResult(this, func.apply(_$1, args));
};
});
return _$1;
}
// Add all mutator `Array` functions to the wrapper.
each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
_$1.prototype[name] = function() {
var obj = this._wrapped;
if (obj != null) {
method.apply(obj, arguments);
if ((name === 'shift' || name === 'splice') && obj.length === 0) {
delete obj[0];
}
}
return chainResult(this, obj);
};
});
// Add all accessor `Array` functions to the wrapper.
each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
_$1.prototype[name] = function() {
var obj = this._wrapped;
if (obj != null) obj = method.apply(obj, arguments);
return chainResult(this, obj);
};
});
// Named Exports
var allExports = {
__proto__: null,
VERSION: VERSION,
restArguments: restArguments,
isObject: isObject,
isNull: isNull,
isUndefined: isUndefined,
isBoolean: isBoolean,
isElement: isElement,
isString: isString,
isNumber: isNumber,
isDate: isDate,
isRegExp: isRegExp,
isError: isError,
isSymbol: isSymbol,
isArrayBuffer: isArrayBuffer,
isDataView: isDataView$1,
isArray: isArray,
isFunction: isFunction$1,
isArguments: isArguments$1,
isFinite: isFinite$1,
isNaN: isNaN$1,
isTypedArray: isTypedArray$1,
isEmpty: isEmpty,
isMatch: isMatch,
isEqual: isEqual,
isMap: isMap,
isWeakMap: isWeakMap,
isSet: isSet,
isWeakSet: isWeakSet,
keys: keys,
allKeys: allKeys,
values: values,
pairs: pairs,
invert: invert,
functions: functions,
methods: functions,
extend: extend,
extendOwn: extendOwn,
assign: extendOwn,
defaults: defaults,
create: create,
clone: clone,
tap: tap,
get: get,
has: has,
mapObject: mapObject,
identity: identity,
constant: constant,
noop: noop,
toPath: toPath$1,
property: property,
propertyOf: propertyOf,
matcher: matcher,
matches: matcher,
times: times,
random: random,
now: now,
escape: _escape,
unescape: _unescape,
templateSettings: templateSettings,
template: template,
result: result,
uniqueId: uniqueId,
chain: chain,
iteratee: iteratee,
partial: partial,
bind: bind,
bindAll: bindAll,
memoize: memoize,
delay: delay,
defer: defer,
throttle: throttle,
debounce: debounce,
wrap: wrap,
negate: negate,
compose: compose,
after: after,
before: before,
once: once,
findKey: findKey,
findIndex: findIndex,
findLastIndex: findLastIndex,
sortedIndex: sortedIndex,
indexOf: indexOf,
lastIndexOf: lastIndexOf,
find: find,
detect: find,
findWhere: findWhere,
each: each,
forEach: each,
map: map,
collect: map,
reduce: reduce,
foldl: reduce,
inject: reduce,
reduceRight: reduceRight,
foldr: reduceRight,
filter: filter,
select: filter,
reject: reject,
every: every,
all: every,
some: some,
any: some,
contains: contains,
includes: contains,
include: contains,
invoke: invoke,
pluck: pluck,
where: where,
max: max,
min: min,
shuffle: shuffle,
sample: sample,
sortBy: sortBy,
groupBy: groupBy,
indexBy: indexBy,
countBy: countBy,
partition: partition,
toArray: toArray,
size: size,
pick: pick,
omit: omit,
first: first,
head: first,
take: first,
initial: initial,
last: last,
rest: rest,
tail: rest,
drop: rest,
compact: compact,
flatten: flatten,
without: without,
uniq: uniq,
unique: uniq,
union: union,
intersection: intersection,
difference: difference,
unzip: unzip,
transpose: unzip,
zip: zip,
object: object,
range: range,
chunk: chunk,
mixin: mixin,
'default': _$1
};
// Default Export
// Add all of the Underscore functions to the wrapper object.
var _ = mixin(allExports);
// Legacy Node.js API.
_._ = _;
return _;
})));
//# sourceMappingURL=underscore-umd.js.map | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/build/html/_static/underscore-1.13.1.js | underscore-1.13.1.js |
* select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
*/
jQuery.urldecode = function(x) {
if (!x) {
return x
}
return decodeURIComponent(x.replace(/\+/g, ' '));
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) {
if (node.nodeType === 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 &&
!jQuery(node.parentNode).hasClass(className) &&
!jQuery(node.parentNode).hasClass("nohighlight")) {
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
var bbox = node.parentElement.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
addItems.push({
"parent": node.parentNode,
"target": rect});
}
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this, addItems);
});
}
}
var addItems = [];
var result = this.each(function() {
highlight(this, addItems);
});
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
this.initOnKeyListeners();
}
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated === 'undefined')
return string;
return (typeof translated === 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated === 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) === 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this === '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
},
initOnKeyListeners: function() {
$(document).keydown(function(event) {
var activeElementType = document.activeElement.tagName;
// don't navigate when in search box, textarea, dropdown or button
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT'
&& activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey
&& !event.shiftKey) {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
if (prevHref) {
window.location.href = prevHref;
return false;
}
break;
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
break;
}
}
});
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/build/html/_static/doctools.js | doctools.js |
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
/* Non-minified version is copied as a separate JS file, is available */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
} | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/build/html/_static/language_data.js | language_data.js |
!function(n,r){"object"==typeof exports&&"undefined"!=typeof module?module.exports=r():"function"==typeof define&&define.amd?define("underscore",r):(n="undefined"!=typeof globalThis?globalThis:n||self,function(){var t=n._,e=n._=r();e.noConflict=function(){return n._=t,e}}())}(this,(function(){
// Underscore.js 1.13.1
// https://underscorejs.org
// (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
var n="1.13.1",r="object"==typeof self&&self.self===self&&self||"object"==typeof global&&global.global===global&&global||Function("return this")()||{},t=Array.prototype,e=Object.prototype,u="undefined"!=typeof Symbol?Symbol.prototype:null,o=t.push,i=t.slice,a=e.toString,f=e.hasOwnProperty,c="undefined"!=typeof ArrayBuffer,l="undefined"!=typeof DataView,s=Array.isArray,p=Object.keys,v=Object.create,h=c&&ArrayBuffer.isView,y=isNaN,d=isFinite,g=!{toString:null}.propertyIsEnumerable("toString"),b=["valueOf","isPrototypeOf","toString","propertyIsEnumerable","hasOwnProperty","toLocaleString"],m=Math.pow(2,53)-1;function j(n,r){return r=null==r?n.length-1:+r,function(){for(var t=Math.max(arguments.length-r,0),e=Array(t),u=0;u<t;u++)e[u]=arguments[u+r];switch(r){case 0:return n.call(this,e);case 1:return n.call(this,arguments[0],e);case 2:return n.call(this,arguments[0],arguments[1],e)}var o=Array(r+1);for(u=0;u<r;u++)o[u]=arguments[u];return o[r]=e,n.apply(this,o)}}function _(n){var r=typeof n;return"function"===r||"object"===r&&!!n}function w(n){return void 0===n}function A(n){return!0===n||!1===n||"[object Boolean]"===a.call(n)}function x(n){var r="[object "+n+"]";return function(n){return a.call(n)===r}}var S=x("String"),O=x("Number"),M=x("Date"),E=x("RegExp"),B=x("Error"),N=x("Symbol"),I=x("ArrayBuffer"),T=x("Function"),k=r.document&&r.document.childNodes;"function"!=typeof/./&&"object"!=typeof Int8Array&&"function"!=typeof k&&(T=function(n){return"function"==typeof n||!1});var D=T,R=x("Object"),F=l&&R(new DataView(new ArrayBuffer(8))),V="undefined"!=typeof Map&&R(new Map),P=x("DataView");var q=F?function(n){return null!=n&&D(n.getInt8)&&I(n.buffer)}:P,U=s||x("Array");function W(n,r){return null!=n&&f.call(n,r)}var z=x("Arguments");!function(){z(arguments)||(z=function(n){return W(n,"callee")})}();var L=z;function $(n){return O(n)&&y(n)}function C(n){return function(){return n}}function K(n){return function(r){var t=n(r);return"number"==typeof t&&t>=0&&t<=m}}function J(n){return function(r){return null==r?void 0:r[n]}}var G=J("byteLength"),H=K(G),Q=/\[object ((I|Ui)nt(8|16|32)|Float(32|64)|Uint8Clamped|Big(I|Ui)nt64)Array\]/;var X=c?function(n){return h?h(n)&&!q(n):H(n)&&Q.test(a.call(n))}:C(!1),Y=J("length");function Z(n,r){r=function(n){for(var r={},t=n.length,e=0;e<t;++e)r[n[e]]=!0;return{contains:function(n){return r[n]},push:function(t){return r[t]=!0,n.push(t)}}}(r);var t=b.length,u=n.constructor,o=D(u)&&u.prototype||e,i="constructor";for(W(n,i)&&!r.contains(i)&&r.push(i);t--;)(i=b[t])in n&&n[i]!==o[i]&&!r.contains(i)&&r.push(i)}function nn(n){if(!_(n))return[];if(p)return p(n);var r=[];for(var t in n)W(n,t)&&r.push(t);return g&&Z(n,r),r}function rn(n,r){var t=nn(r),e=t.length;if(null==n)return!e;for(var u=Object(n),o=0;o<e;o++){var i=t[o];if(r[i]!==u[i]||!(i in u))return!1}return!0}function tn(n){return n instanceof tn?n:this instanceof tn?void(this._wrapped=n):new tn(n)}function en(n){return new Uint8Array(n.buffer||n,n.byteOffset||0,G(n))}tn.VERSION=n,tn.prototype.value=function(){return this._wrapped},tn.prototype.valueOf=tn.prototype.toJSON=tn.prototype.value,tn.prototype.toString=function(){return String(this._wrapped)};var un="[object DataView]";function on(n,r,t,e){if(n===r)return 0!==n||1/n==1/r;if(null==n||null==r)return!1;if(n!=n)return r!=r;var o=typeof n;return("function"===o||"object"===o||"object"==typeof r)&&function n(r,t,e,o){r instanceof tn&&(r=r._wrapped);t instanceof tn&&(t=t._wrapped);var i=a.call(r);if(i!==a.call(t))return!1;if(F&&"[object Object]"==i&&q(r)){if(!q(t))return!1;i=un}switch(i){case"[object RegExp]":case"[object String]":return""+r==""+t;case"[object Number]":return+r!=+r?+t!=+t:0==+r?1/+r==1/t:+r==+t;case"[object Date]":case"[object Boolean]":return+r==+t;case"[object Symbol]":return u.valueOf.call(r)===u.valueOf.call(t);case"[object ArrayBuffer]":case un:return n(en(r),en(t),e,o)}var f="[object Array]"===i;if(!f&&X(r)){if(G(r)!==G(t))return!1;if(r.buffer===t.buffer&&r.byteOffset===t.byteOffset)return!0;f=!0}if(!f){if("object"!=typeof r||"object"!=typeof t)return!1;var c=r.constructor,l=t.constructor;if(c!==l&&!(D(c)&&c instanceof c&&D(l)&&l instanceof l)&&"constructor"in r&&"constructor"in t)return!1}o=o||[];var s=(e=e||[]).length;for(;s--;)if(e[s]===r)return o[s]===t;if(e.push(r),o.push(t),f){if((s=r.length)!==t.length)return!1;for(;s--;)if(!on(r[s],t[s],e,o))return!1}else{var p,v=nn(r);if(s=v.length,nn(t).length!==s)return!1;for(;s--;)if(p=v[s],!W(t,p)||!on(r[p],t[p],e,o))return!1}return e.pop(),o.pop(),!0}(n,r,t,e)}function an(n){if(!_(n))return[];var r=[];for(var t in n)r.push(t);return g&&Z(n,r),r}function fn(n){var r=Y(n);return function(t){if(null==t)return!1;var e=an(t);if(Y(e))return!1;for(var u=0;u<r;u++)if(!D(t[n[u]]))return!1;return n!==hn||!D(t[cn])}}var cn="forEach",ln="has",sn=["clear","delete"],pn=["get",ln,"set"],vn=sn.concat(cn,pn),hn=sn.concat(pn),yn=["add"].concat(sn,cn,ln),dn=V?fn(vn):x("Map"),gn=V?fn(hn):x("WeakMap"),bn=V?fn(yn):x("Set"),mn=x("WeakSet");function jn(n){for(var r=nn(n),t=r.length,e=Array(t),u=0;u<t;u++)e[u]=n[r[u]];return e}function _n(n){for(var r={},t=nn(n),e=0,u=t.length;e<u;e++)r[n[t[e]]]=t[e];return r}function wn(n){var r=[];for(var t in n)D(n[t])&&r.push(t);return r.sort()}function An(n,r){return function(t){var e=arguments.length;if(r&&(t=Object(t)),e<2||null==t)return t;for(var u=1;u<e;u++)for(var o=arguments[u],i=n(o),a=i.length,f=0;f<a;f++){var c=i[f];r&&void 0!==t[c]||(t[c]=o[c])}return t}}var xn=An(an),Sn=An(nn),On=An(an,!0);function Mn(n){if(!_(n))return{};if(v)return v(n);var r=function(){};r.prototype=n;var t=new r;return r.prototype=null,t}function En(n){return _(n)?U(n)?n.slice():xn({},n):n}function Bn(n){return U(n)?n:[n]}function Nn(n){return tn.toPath(n)}function In(n,r){for(var t=r.length,e=0;e<t;e++){if(null==n)return;n=n[r[e]]}return t?n:void 0}function Tn(n,r,t){var e=In(n,Nn(r));return w(e)?t:e}function kn(n){return n}function Dn(n){return n=Sn({},n),function(r){return rn(r,n)}}function Rn(n){return n=Nn(n),function(r){return In(r,n)}}function Fn(n,r,t){if(void 0===r)return n;switch(null==t?3:t){case 1:return function(t){return n.call(r,t)};case 3:return function(t,e,u){return n.call(r,t,e,u)};case 4:return function(t,e,u,o){return n.call(r,t,e,u,o)}}return function(){return n.apply(r,arguments)}}function Vn(n,r,t){return null==n?kn:D(n)?Fn(n,r,t):_(n)&&!U(n)?Dn(n):Rn(n)}function Pn(n,r){return Vn(n,r,1/0)}function qn(n,r,t){return tn.iteratee!==Pn?tn.iteratee(n,r):Vn(n,r,t)}function Un(){}function Wn(n,r){return null==r&&(r=n,n=0),n+Math.floor(Math.random()*(r-n+1))}tn.toPath=Bn,tn.iteratee=Pn;var zn=Date.now||function(){return(new Date).getTime()};function Ln(n){var r=function(r){return n[r]},t="(?:"+nn(n).join("|")+")",e=RegExp(t),u=RegExp(t,"g");return function(n){return n=null==n?"":""+n,e.test(n)?n.replace(u,r):n}}var $n={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`"},Cn=Ln($n),Kn=Ln(_n($n)),Jn=tn.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g},Gn=/(.)^/,Hn={"'":"'","\\":"\\","\r":"r","\n":"n","\u2028":"u2028","\u2029":"u2029"},Qn=/\\|'|\r|\n|\u2028|\u2029/g;function Xn(n){return"\\"+Hn[n]}var Yn=/^\s*(\w|\$)+\s*$/;var Zn=0;function nr(n,r,t,e,u){if(!(e instanceof r))return n.apply(t,u);var o=Mn(n.prototype),i=n.apply(o,u);return _(i)?i:o}var rr=j((function(n,r){var t=rr.placeholder,e=function(){for(var u=0,o=r.length,i=Array(o),a=0;a<o;a++)i[a]=r[a]===t?arguments[u++]:r[a];for(;u<arguments.length;)i.push(arguments[u++]);return nr(n,e,this,this,i)};return e}));rr.placeholder=tn;var tr=j((function(n,r,t){if(!D(n))throw new TypeError("Bind must be called on a function");var e=j((function(u){return nr(n,e,r,this,t.concat(u))}));return e})),er=K(Y);function ur(n,r,t,e){if(e=e||[],r||0===r){if(r<=0)return e.concat(n)}else r=1/0;for(var u=e.length,o=0,i=Y(n);o<i;o++){var a=n[o];if(er(a)&&(U(a)||L(a)))if(r>1)ur(a,r-1,t,e),u=e.length;else for(var f=0,c=a.length;f<c;)e[u++]=a[f++];else t||(e[u++]=a)}return e}var or=j((function(n,r){var t=(r=ur(r,!1,!1)).length;if(t<1)throw new Error("bindAll must be passed function names");for(;t--;){var e=r[t];n[e]=tr(n[e],n)}return n}));var ir=j((function(n,r,t){return setTimeout((function(){return n.apply(null,t)}),r)})),ar=rr(ir,tn,1);function fr(n){return function(){return!n.apply(this,arguments)}}function cr(n,r){var t;return function(){return--n>0&&(t=r.apply(this,arguments)),n<=1&&(r=null),t}}var lr=rr(cr,2);function sr(n,r,t){r=qn(r,t);for(var e,u=nn(n),o=0,i=u.length;o<i;o++)if(r(n[e=u[o]],e,n))return e}function pr(n){return function(r,t,e){t=qn(t,e);for(var u=Y(r),o=n>0?0:u-1;o>=0&&o<u;o+=n)if(t(r[o],o,r))return o;return-1}}var vr=pr(1),hr=pr(-1);function yr(n,r,t,e){for(var u=(t=qn(t,e,1))(r),o=0,i=Y(n);o<i;){var a=Math.floor((o+i)/2);t(n[a])<u?o=a+1:i=a}return o}function dr(n,r,t){return function(e,u,o){var a=0,f=Y(e);if("number"==typeof o)n>0?a=o>=0?o:Math.max(o+f,a):f=o>=0?Math.min(o+1,f):o+f+1;else if(t&&o&&f)return e[o=t(e,u)]===u?o:-1;if(u!=u)return(o=r(i.call(e,a,f),$))>=0?o+a:-1;for(o=n>0?a:f-1;o>=0&&o<f;o+=n)if(e[o]===u)return o;return-1}}var gr=dr(1,vr,yr),br=dr(-1,hr);function mr(n,r,t){var e=(er(n)?vr:sr)(n,r,t);if(void 0!==e&&-1!==e)return n[e]}function jr(n,r,t){var e,u;if(r=Fn(r,t),er(n))for(e=0,u=n.length;e<u;e++)r(n[e],e,n);else{var o=nn(n);for(e=0,u=o.length;e<u;e++)r(n[o[e]],o[e],n)}return n}function _r(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=Array(u),i=0;i<u;i++){var a=e?e[i]:i;o[i]=r(n[a],a,n)}return o}function wr(n){var r=function(r,t,e,u){var o=!er(r)&&nn(r),i=(o||r).length,a=n>0?0:i-1;for(u||(e=r[o?o[a]:a],a+=n);a>=0&&a<i;a+=n){var f=o?o[a]:a;e=t(e,r[f],f,r)}return e};return function(n,t,e,u){var o=arguments.length>=3;return r(n,Fn(t,u,4),e,o)}}var Ar=wr(1),xr=wr(-1);function Sr(n,r,t){var e=[];return r=qn(r,t),jr(n,(function(n,t,u){r(n,t,u)&&e.push(n)})),e}function Or(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=0;o<u;o++){var i=e?e[o]:o;if(!r(n[i],i,n))return!1}return!0}function Mr(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=0;o<u;o++){var i=e?e[o]:o;if(r(n[i],i,n))return!0}return!1}function Er(n,r,t,e){return er(n)||(n=jn(n)),("number"!=typeof t||e)&&(t=0),gr(n,r,t)>=0}var Br=j((function(n,r,t){var e,u;return D(r)?u=r:(r=Nn(r),e=r.slice(0,-1),r=r[r.length-1]),_r(n,(function(n){var o=u;if(!o){if(e&&e.length&&(n=In(n,e)),null==n)return;o=n[r]}return null==o?o:o.apply(n,t)}))}));function Nr(n,r){return _r(n,Rn(r))}function Ir(n,r,t){var e,u,o=-1/0,i=-1/0;if(null==r||"number"==typeof r&&"object"!=typeof n[0]&&null!=n)for(var a=0,f=(n=er(n)?n:jn(n)).length;a<f;a++)null!=(e=n[a])&&e>o&&(o=e);else r=qn(r,t),jr(n,(function(n,t,e){((u=r(n,t,e))>i||u===-1/0&&o===-1/0)&&(o=n,i=u)}));return o}function Tr(n,r,t){if(null==r||t)return er(n)||(n=jn(n)),n[Wn(n.length-1)];var e=er(n)?En(n):jn(n),u=Y(e);r=Math.max(Math.min(r,u),0);for(var o=u-1,i=0;i<r;i++){var a=Wn(i,o),f=e[i];e[i]=e[a],e[a]=f}return e.slice(0,r)}function kr(n,r){return function(t,e,u){var o=r?[[],[]]:{};return e=qn(e,u),jr(t,(function(r,u){var i=e(r,u,t);n(o,r,i)})),o}}var Dr=kr((function(n,r,t){W(n,t)?n[t].push(r):n[t]=[r]})),Rr=kr((function(n,r,t){n[t]=r})),Fr=kr((function(n,r,t){W(n,t)?n[t]++:n[t]=1})),Vr=kr((function(n,r,t){n[t?0:1].push(r)}),!0),Pr=/[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;function qr(n,r,t){return r in t}var Ur=j((function(n,r){var t={},e=r[0];if(null==n)return t;D(e)?(r.length>1&&(e=Fn(e,r[1])),r=an(n)):(e=qr,r=ur(r,!1,!1),n=Object(n));for(var u=0,o=r.length;u<o;u++){var i=r[u],a=n[i];e(a,i,n)&&(t[i]=a)}return t})),Wr=j((function(n,r){var t,e=r[0];return D(e)?(e=fr(e),r.length>1&&(t=r[1])):(r=_r(ur(r,!1,!1),String),e=function(n,t){return!Er(r,t)}),Ur(n,e,t)}));function zr(n,r,t){return i.call(n,0,Math.max(0,n.length-(null==r||t?1:r)))}function Lr(n,r,t){return null==n||n.length<1?null==r||t?void 0:[]:null==r||t?n[0]:zr(n,n.length-r)}function $r(n,r,t){return i.call(n,null==r||t?1:r)}var Cr=j((function(n,r){return r=ur(r,!0,!0),Sr(n,(function(n){return!Er(r,n)}))})),Kr=j((function(n,r){return Cr(n,r)}));function Jr(n,r,t,e){A(r)||(e=t,t=r,r=!1),null!=t&&(t=qn(t,e));for(var u=[],o=[],i=0,a=Y(n);i<a;i++){var f=n[i],c=t?t(f,i,n):f;r&&!t?(i&&o===c||u.push(f),o=c):t?Er(o,c)||(o.push(c),u.push(f)):Er(u,f)||u.push(f)}return u}var Gr=j((function(n){return Jr(ur(n,!0,!0))}));function Hr(n){for(var r=n&&Ir(n,Y).length||0,t=Array(r),e=0;e<r;e++)t[e]=Nr(n,e);return t}var Qr=j(Hr);function Xr(n,r){return n._chain?tn(r).chain():r}function Yr(n){return jr(wn(n),(function(r){var t=tn[r]=n[r];tn.prototype[r]=function(){var n=[this._wrapped];return o.apply(n,arguments),Xr(this,t.apply(tn,n))}})),tn}jr(["pop","push","reverse","shift","sort","splice","unshift"],(function(n){var r=t[n];tn.prototype[n]=function(){var t=this._wrapped;return null!=t&&(r.apply(t,arguments),"shift"!==n&&"splice"!==n||0!==t.length||delete t[0]),Xr(this,t)}})),jr(["concat","join","slice"],(function(n){var r=t[n];tn.prototype[n]=function(){var n=this._wrapped;return null!=n&&(n=r.apply(n,arguments)),Xr(this,n)}}));var Zr=Yr({__proto__:null,VERSION:n,restArguments:j,isObject:_,isNull:function(n){return null===n},isUndefined:w,isBoolean:A,isElement:function(n){return!(!n||1!==n.nodeType)},isString:S,isNumber:O,isDate:M,isRegExp:E,isError:B,isSymbol:N,isArrayBuffer:I,isDataView:q,isArray:U,isFunction:D,isArguments:L,isFinite:function(n){return!N(n)&&d(n)&&!isNaN(parseFloat(n))},isNaN:$,isTypedArray:X,isEmpty:function(n){if(null==n)return!0;var r=Y(n);return"number"==typeof r&&(U(n)||S(n)||L(n))?0===r:0===Y(nn(n))},isMatch:rn,isEqual:function(n,r){return on(n,r)},isMap:dn,isWeakMap:gn,isSet:bn,isWeakSet:mn,keys:nn,allKeys:an,values:jn,pairs:function(n){for(var r=nn(n),t=r.length,e=Array(t),u=0;u<t;u++)e[u]=[r[u],n[r[u]]];return e},invert:_n,functions:wn,methods:wn,extend:xn,extendOwn:Sn,assign:Sn,defaults:On,create:function(n,r){var t=Mn(n);return r&&Sn(t,r),t},clone:En,tap:function(n,r){return r(n),n},get:Tn,has:function(n,r){for(var t=(r=Nn(r)).length,e=0;e<t;e++){var u=r[e];if(!W(n,u))return!1;n=n[u]}return!!t},mapObject:function(n,r,t){r=qn(r,t);for(var e=nn(n),u=e.length,o={},i=0;i<u;i++){var a=e[i];o[a]=r(n[a],a,n)}return o},identity:kn,constant:C,noop:Un,toPath:Bn,property:Rn,propertyOf:function(n){return null==n?Un:function(r){return Tn(n,r)}},matcher:Dn,matches:Dn,times:function(n,r,t){var e=Array(Math.max(0,n));r=Fn(r,t,1);for(var u=0;u<n;u++)e[u]=r(u);return e},random:Wn,now:zn,escape:Cn,unescape:Kn,templateSettings:Jn,template:function(n,r,t){!r&&t&&(r=t),r=On({},r,tn.templateSettings);var e=RegExp([(r.escape||Gn).source,(r.interpolate||Gn).source,(r.evaluate||Gn).source].join("|")+"|$","g"),u=0,o="__p+='";n.replace(e,(function(r,t,e,i,a){return o+=n.slice(u,a).replace(Qn,Xn),u=a+r.length,t?o+="'+\n((__t=("+t+"))==null?'':_.escape(__t))+\n'":e?o+="'+\n((__t=("+e+"))==null?'':__t)+\n'":i&&(o+="';\n"+i+"\n__p+='"),r})),o+="';\n";var i,a=r.variable;if(a){if(!Yn.test(a))throw new Error("variable is not a bare identifier: "+a)}else o="with(obj||{}){\n"+o+"}\n",a="obj";o="var __t,__p='',__j=Array.prototype.join,"+"print=function(){__p+=__j.call(arguments,'');};\n"+o+"return __p;\n";try{i=new Function(a,"_",o)}catch(n){throw n.source=o,n}var f=function(n){return i.call(this,n,tn)};return f.source="function("+a+"){\n"+o+"}",f},result:function(n,r,t){var e=(r=Nn(r)).length;if(!e)return D(t)?t.call(n):t;for(var u=0;u<e;u++){var o=null==n?void 0:n[r[u]];void 0===o&&(o=t,u=e),n=D(o)?o.call(n):o}return n},uniqueId:function(n){var r=++Zn+"";return n?n+r:r},chain:function(n){var r=tn(n);return r._chain=!0,r},iteratee:Pn,partial:rr,bind:tr,bindAll:or,memoize:function(n,r){var t=function(e){var u=t.cache,o=""+(r?r.apply(this,arguments):e);return W(u,o)||(u[o]=n.apply(this,arguments)),u[o]};return t.cache={},t},delay:ir,defer:ar,throttle:function(n,r,t){var e,u,o,i,a=0;t||(t={});var f=function(){a=!1===t.leading?0:zn(),e=null,i=n.apply(u,o),e||(u=o=null)},c=function(){var c=zn();a||!1!==t.leading||(a=c);var l=r-(c-a);return u=this,o=arguments,l<=0||l>r?(e&&(clearTimeout(e),e=null),a=c,i=n.apply(u,o),e||(u=o=null)):e||!1===t.trailing||(e=setTimeout(f,l)),i};return c.cancel=function(){clearTimeout(e),a=0,e=u=o=null},c},debounce:function(n,r,t){var e,u,o,i,a,f=function(){var c=zn()-u;r>c?e=setTimeout(f,r-c):(e=null,t||(i=n.apply(a,o)),e||(o=a=null))},c=j((function(c){return a=this,o=c,u=zn(),e||(e=setTimeout(f,r),t&&(i=n.apply(a,o))),i}));return c.cancel=function(){clearTimeout(e),e=o=a=null},c},wrap:function(n,r){return rr(r,n)},negate:fr,compose:function(){var n=arguments,r=n.length-1;return function(){for(var t=r,e=n[r].apply(this,arguments);t--;)e=n[t].call(this,e);return e}},after:function(n,r){return function(){if(--n<1)return r.apply(this,arguments)}},before:cr,once:lr,findKey:sr,findIndex:vr,findLastIndex:hr,sortedIndex:yr,indexOf:gr,lastIndexOf:br,find:mr,detect:mr,findWhere:function(n,r){return mr(n,Dn(r))},each:jr,forEach:jr,map:_r,collect:_r,reduce:Ar,foldl:Ar,inject:Ar,reduceRight:xr,foldr:xr,filter:Sr,select:Sr,reject:function(n,r,t){return Sr(n,fr(qn(r)),t)},every:Or,all:Or,some:Mr,any:Mr,contains:Er,includes:Er,include:Er,invoke:Br,pluck:Nr,where:function(n,r){return Sr(n,Dn(r))},max:Ir,min:function(n,r,t){var e,u,o=1/0,i=1/0;if(null==r||"number"==typeof r&&"object"!=typeof n[0]&&null!=n)for(var a=0,f=(n=er(n)?n:jn(n)).length;a<f;a++)null!=(e=n[a])&&e<o&&(o=e);else r=qn(r,t),jr(n,(function(n,t,e){((u=r(n,t,e))<i||u===1/0&&o===1/0)&&(o=n,i=u)}));return o},shuffle:function(n){return Tr(n,1/0)},sample:Tr,sortBy:function(n,r,t){var e=0;return r=qn(r,t),Nr(_r(n,(function(n,t,u){return{value:n,index:e++,criteria:r(n,t,u)}})).sort((function(n,r){var t=n.criteria,e=r.criteria;if(t!==e){if(t>e||void 0===t)return 1;if(t<e||void 0===e)return-1}return n.index-r.index})),"value")},groupBy:Dr,indexBy:Rr,countBy:Fr,partition:Vr,toArray:function(n){return n?U(n)?i.call(n):S(n)?n.match(Pr):er(n)?_r(n,kn):jn(n):[]},size:function(n){return null==n?0:er(n)?n.length:nn(n).length},pick:Ur,omit:Wr,first:Lr,head:Lr,take:Lr,initial:zr,last:function(n,r,t){return null==n||n.length<1?null==r||t?void 0:[]:null==r||t?n[n.length-1]:$r(n,Math.max(0,n.length-r))},rest:$r,tail:$r,drop:$r,compact:function(n){return Sr(n,Boolean)},flatten:function(n,r){return ur(n,r,!1)},without:Kr,uniq:Jr,unique:Jr,union:Gr,intersection:function(n){for(var r=[],t=arguments.length,e=0,u=Y(n);e<u;e++){var o=n[e];if(!Er(r,o)){var i;for(i=1;i<t&&Er(arguments[i],o);i++);i===t&&r.push(o)}}return r},difference:Cr,unzip:Hr,transpose:Hr,zip:Qr,object:function(n,r){for(var t={},e=0,u=Y(n);e<u;e++)r?t[n[e]]=r[e]:t[n[e][0]]=n[e][1];return t},range:function(n,r,t){null==r&&(r=n||0,n=0),t||(t=r<n?-1:1);for(var e=Math.max(Math.ceil((r-n)/t),0),u=Array(e),o=0;o<e;o++,n+=t)u[o]=n;return u},chunk:function(n,r){if(null==r||r<1)return[];for(var t=[],e=0,u=n.length;e<u;)t.push(i.call(n,e,e+=r));return t},mixin:Yr,default:tn});return Zr._=Zr,Zr})); | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/build/html/_static/underscore.js | underscore.js |
if (!Scorer) {
/**
* Simple result scoring code.
*/
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [filename, title, anchor, descr, score]
// and returns the new score.
/*
score: function(result) {
return result[4];
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5}, // used to be unimportantResults
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2
};
}
if (!splitQuery) {
function splitQuery(query) {
return query.split(/\s+/);
}
}
/**
* Search Module
*/
var Search = {
_index : null,
_queued_query : null,
_pulse_status : -1,
htmlToText : function(htmlString) {
var virtualDocument = document.implementation.createHTMLDocument('virtual');
var htmlElement = $(htmlString, virtualDocument);
htmlElement.find('.headerlink').remove();
docContent = htmlElement.find('[role=main]')[0];
if(docContent === undefined) {
console.warn("Content block not found. Sphinx search tries to obtain it " +
"via '[role=main]'. Could you check your theme or template.");
return "";
}
return docContent.textContent || docContent.innerText;
},
init : function() {
var params = $.getQueryParameters();
if (params.q) {
var query = params.q[0];
$('input[name="q"]')[0].value = query;
this.performSearch(query);
}
},
loadIndex : function(url) {
$.ajax({type: "GET", url: url, data: null,
dataType: "script", cache: true,
complete: function(jqxhr, textstatus) {
if (textstatus != "success") {
document.getElementById("searchindexloader").src = url;
}
}});
},
setIndex : function(index) {
var q;
this._index = index;
if ((q = this._queued_query) !== null) {
this._queued_query = null;
Search.query(q);
}
},
hasIndex : function() {
return this._index !== null;
},
deferQuery : function(query) {
this._queued_query = query;
},
stopPulse : function() {
this._pulse_status = 0;
},
startPulse : function() {
if (this._pulse_status >= 0)
return;
function pulse() {
var i;
Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = '';
for (i = 0; i < Search._pulse_status; i++)
dotString += '.';
Search.dots.text(dotString);
if (Search._pulse_status > -1)
window.setTimeout(pulse, 500);
}
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch : function(query) {
// create the required interface elements
this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p class="search-summary"> </p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...'));
this.startPulse();
// index already loaded, the browser was quick!
if (this.hasIndex())
this.query(query);
else
this.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query : function(query) {
var i;
// stem the searchterms and add them to the correct list
var stemmer = new Stemmer();
var searchterms = [];
var excluded = [];
var hlterms = [];
var tmp = splitQuery(query);
var objectterms = [];
for (i = 0; i < tmp.length; i++) {
if (tmp[i] !== "") {
objectterms.push(tmp[i].toLowerCase());
}
if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i] === "") {
// skip this "word"
continue;
}
// stem the word
var word = stemmer.stemWord(tmp[i].toLowerCase());
// prevent stemmer from cutting word smaller than two chars
if(word.length < 3 && tmp[i].length >= 3) {
word = tmp[i];
}
var toAppend;
// select the correct list
if (word[0] == '-') {
toAppend = excluded;
word = word.substr(1);
}
else {
toAppend = searchterms;
hlterms.push(tmp[i].toLowerCase());
}
// only add if not already in the list
if (!$u.contains(toAppend, word))
toAppend.push(word);
}
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
// console.debug('SEARCH: searching for:');
// console.info('required: ', searchterms);
// console.info('excluded: ', excluded);
// prepare search
var terms = this._index.terms;
var titleterms = this._index.titleterms;
// array of [filename, title, anchor, descr, score]
var results = [];
$('#search-progress').empty();
// lookup as object
for (i = 0; i < objectterms.length; i++) {
var others = [].concat(objectterms.slice(0, i),
objectterms.slice(i+1, objectterms.length));
results = results.concat(this.performObjectSearch(objectterms[i], others));
}
// lookup as search terms in fulltext
results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) {
for (i = 0; i < results.length; i++)
results[i][4] = Scorer.score(results[i]);
}
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort(function(a, b) {
var left = a[4];
var right = b[4];
if (left > right) {
return 1;
} else if (left < right) {
return -1;
} else {
// same score: sort alphabetically
left = a[1].toLowerCase();
right = b[1].toLowerCase();
return (left > right) ? -1 : ((left < right) ? 1 : 0);
}
});
// for debugging
//Search.lastresults = results.slice(); // a copy
//console.info('search results:', Search.lastresults);
// print the results
var resultCount = results.length;
function displayNextItem() {
// results left, load the summary and display it
if (results.length) {
var item = results.pop();
var listItem = $('<li></li>');
var requestUrl = "";
var linkUrl = "";
if (DOCUMENTATION_OPTIONS.BUILDER === 'dirhtml') {
// dirhtml builder
var dirname = item[0] + '/';
if (dirname.match(/\/index\/$/)) {
dirname = dirname.substring(0, dirname.length-6);
} else if (dirname == 'index/') {
dirname = '';
}
requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX;
linkUrl = item[0] + DOCUMENTATION_OPTIONS.LINK_SUFFIX;
}
listItem.append($('<a/>').attr('href',
linkUrl +
highlightstring + item[2]).html(item[1]));
if (item[3]) {
listItem.append($('<span> (' + item[3] + ')</span>'));
Search.output.append(listItem);
setTimeout(function() {
displayNextItem();
}, 5);
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
$.ajax({url: requestUrl,
dataType: "text",
complete: function(jqxhr, textstatus) {
var data = jqxhr.responseText;
if (data !== '' && data !== undefined) {
listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
}
Search.output.append(listItem);
setTimeout(function() {
displayNextItem();
}, 5);
}});
} else {
// no source available, just display title
Search.output.append(listItem);
setTimeout(function() {
displayNextItem();
}, 5);
}
}
// search finished, update title and status message
else {
Search.stopPulse();
Search.title.text(_('Search Results'));
if (!resultCount)
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
else
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
Search.status.fadeIn(500);
}
}
displayNextItem();
},
/**
* search for object names
*/
performObjectSearch : function(object, otherterms) {
var filenames = this._index.filenames;
var docnames = this._index.docnames;
var objects = this._index.objects;
var objnames = this._index.objnames;
var titles = this._index.titles;
var i;
var results = [];
for (var prefix in objects) {
for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name;
var fullnameLower = fullname.toLowerCase()
if (fullnameLower.indexOf(object) > -1) {
var score = 0;
var parts = fullnameLower.split('.');
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower == object || parts[parts.length - 1] == object) {
score += Scorer.objNameMatch;
// matches in last name
} else if (parts[parts.length - 1].indexOf(object) > -1) {
score += Scorer.objPartialMatch;
}
var match = objects[prefix][name];
var objname = objnames[match[1]][2];
var title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
if (otherterms.length > 0) {
var haystack = (prefix + ' ' + name + ' ' +
objname + ' ' + title).toLowerCase();
var allfound = true;
for (i = 0; i < otherterms.length; i++) {
if (haystack.indexOf(otherterms[i]) == -1) {
allfound = false;
break;
}
}
if (!allfound) {
continue;
}
}
var descr = objname + _(', in ') + title;
var anchor = match[3];
if (anchor === '')
anchor = fullname;
else if (anchor == '-')
anchor = objnames[match[1]][1] + '-' + fullname;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2])) {
score += Scorer.objPrio[match[2]];
} else {
score += Scorer.objPrioDefault;
}
results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]);
}
}
}
return results;
},
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
*/
escapeRegExp : function(string) {
return string.replace(/[.*+\-?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
},
/**
* search for full-text terms in the index
*/
performTermsSearch : function(searchterms, excluded, terms, titleterms) {
var docnames = this._index.docnames;
var filenames = this._index.filenames;
var titles = this._index.titles;
var i, j, file;
var fileMap = {};
var scoreMap = {};
var results = [];
// perform the search on the required terms
for (i = 0; i < searchterms.length; i++) {
var word = searchterms[i];
var files = [];
var _o = [
{files: terms[word], score: Scorer.term},
{files: titleterms[word], score: Scorer.title}
];
// add support for partial matches
if (word.length > 2) {
var word_regex = this.escapeRegExp(word);
for (var w in terms) {
if (w.match(word_regex) && !terms[word]) {
_o.push({files: terms[w], score: Scorer.partialTerm})
}
}
for (var w in titleterms) {
if (w.match(word_regex) && !titleterms[word]) {
_o.push({files: titleterms[w], score: Scorer.partialTitle})
}
}
}
// no match but word was a required one
if ($u.every(_o, function(o){return o.files === undefined;})) {
break;
}
// found search word in contents
$u.each(_o, function(o) {
var _files = o.files;
if (_files === undefined)
return
if (_files.length === undefined)
_files = [_files];
files = files.concat(_files);
// set score for the word in each file to Scorer.term
for (j = 0; j < _files.length; j++) {
file = _files[j];
if (!(file in scoreMap))
scoreMap[file] = {};
scoreMap[file][word] = o.score;
}
});
// create the mapping
for (j = 0; j < files.length; j++) {
file = files[j];
if (file in fileMap && fileMap[file].indexOf(word) === -1)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
}
// now check if the files don't contain excluded terms
for (file in fileMap) {
var valid = true;
// check if all requirements are matched
var filteredTermCount = // as search terms with length < 3 are discarded: ignore
searchterms.filter(function(term){return term.length > 2}).length
if (
fileMap[file].length != searchterms.length &&
fileMap[file].length != filteredTermCount
) continue;
// ensure that none of the excluded terms is in the search result
for (i = 0; i < excluded.length; i++) {
if (terms[excluded[i]] == file ||
titleterms[excluded[i]] == file ||
$u.contains(terms[excluded[i]] || [], file) ||
$u.contains(titleterms[excluded[i]] || [], file)) {
valid = false;
break;
}
}
// if we have still a valid result we can add it to the result list
if (valid) {
// select one (max) score for the file.
// for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
results.push([docnames[file], titles[file], '', null, score, filenames[file]]);
}
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words, hlwords is the list of normal, unstemmed
* words. the first one is used to find the occurrence, the
* latter for highlighting it.
*/
makeSearchSummary : function(htmlText, keywords, hlwords) {
var text = Search.htmlToText(htmlText);
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
});
start = Math.max(start - 120, 0);
var excerpt = ((start > 0) ? '...' : '') +
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<p class="context"></p>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
});
return rv;
}
};
$(document).ready(function() {
Search.init();
}); | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/docs/build/html/_static/searchtools.js | searchtools.js |
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from datetime import datetime
"""
.. module:: ARIMA_model
:synopsis: The file containing the function that utilizes the ARIMA prediction model.
.. moduleauthor:: Haotian Wang <[email protected]>
"""
def arima_prediction(pollutant, state, county, city, date):
""" Opens and prepares the dataset (pollution_us_2000_2016.csv) to be used by the prophet model to predict
the specified pollutant given the location and date parameters. NOTE: Part of Time_Series_Models
Module Author: Haotian Wang <[email protected]>
:param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).
:param state: The location parameter indicating the state in the United States of America to predict for.
:param county: The location parameter indicating the county in the state to predict for.
:param city: The location parameter indicating the city in the county to predict for.
:param date: The calendar date to prediction for.
:return: The value predicted by the ARIMA model as well as the units of the prediction (ppb).
"""
pollutant_choice = pollutant + " AQI"
# read the csv file into a dataframe
df = pd.read_csv('data/pollution_us_2000_2016.csv')
# delete unnecessary data columns
df = df.drop(columns=['Unnamed: 0', 'NO2 Units', 'O3 Units', 'SO2 Units', 'CO Units'])
# delete duplicate data tuples
df.drop_duplicates(inplace=True)
# convert Date local to python date and time
df['date'] = pd.to_datetime(df['Date Local'])
df = df.drop(columns=['Date Local'])
# compute mean AQI for each citiy for each date
mean_aqi = df.groupby(['State', 'County', 'City', 'date'])[['NO2 AQI', 'O3 AQI', 'SO2 AQI', 'CO AQI']].mean()
# reset index mean_aqi
mean_aqi = mean_aqi.reset_index()
# create subset of dataset to include only city and column selected for analysis
temp_df = mean_aqi[(mean_aqi.State == state) & (mean_aqi.County == county) & (mean_aqi.City == city)]
new_df = temp_df.loc[temp_df['City'] == city, ['date', pollutant_choice]]
# use ffill (forward fill) to handle missing value filling the missing value from the previous day
new_df = new_df.ffill()
new_df = new_df.set_index("date")
arima_model = ARIMA(new_df[pollutant_choice], order=(0, 1, 0))
model_fit = arima_model.fit()
date_format = "%Y-%m-%d"
new_df = new_df.reset_index()
start_date_temp = new_df.iloc[len(new_df.index) - 1]['date']
start_date = str(start_date_temp)[:10]
start_date = datetime.strptime(start_date, date_format)
target_date = datetime.strptime(date, date_format)
date_difference = target_date - start_date
mean_forecast = model_fit.forecast(steps=date_difference.days)
if pollutant == "SO2" or pollutant == "NO2":
pollutant_unit = "parts per billion (ppb)"
elif pollutant == "O3" or pollutant == "CO":
pollutant_unit = "parts per million (ppm)"
return mean_forecast[0][len(mean_forecast[0]) - 1], pollutant_unit | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/Time_Series_Models/ARIMA_model.py | ARIMA_model.py |
import pandas as pd
from prophet import Prophet
from datetime import datetime
"""
.. module:: prophet_model
:synopsis: The file containing the function that utilizes the prophet prediction model.
.. moduleauthor:: Haotian Wang <[email protected]>
"""
def prophet_prediction(pollutant, state, county, city, date):
""" Opens and prepares the dataset (pollution_us_2000_2016.csv) to be used by the prophet model to predict
the specified pollutant given the location and date parameters. NOTE: Part of Time_Series_Models
Module Author: Haotian Wang <[email protected]>
:param pollutant: The specified pollutant to predict (NO2, O3, SO2, CO).
:param state: The location parameter indicating the state in the United States of America to predict for.
:param county: The location parameter indicating the county in the state to predict for.
:param city: The location parameter indicating the city in the county to predict for.
:param date: The calendar date to prediction for.
:return: The value predicted by the prophet model as well as the units of the prediction (ppb).
"""
pollutant_choice = pollutant + " AQI"
# read the csv file into a dataframe
df = pd.read_csv('data/pollution_us_2000_2016.csv')
# delete unnecessary data columns
df = df.drop(columns=['Unnamed: 0', 'NO2 Units', 'O3 Units', 'SO2 Units', 'CO Units'])
# delete duplicate data tuples
df.drop_duplicates(inplace=True)
# convert Date local to python date and time
df['date'] = pd.to_datetime(df['Date Local'])
df = df.drop(columns=['Date Local'])
# compute mean AQI for each citiy for each date
mean_aqi = df.groupby(['State', 'County', 'City', 'date'])[['NO2 AQI', 'O3 AQI', 'SO2 AQI', 'CO AQI']].mean()
# reset index mean_aqi
mean_aqi = mean_aqi.reset_index()
# create subset of dataset to include only city and column selected for analysis
temp_df = mean_aqi[(mean_aqi.State == state) & (mean_aqi.County == county) & (mean_aqi.City == city)]
new_df = temp_df.loc[temp_df['City'] == city, ['date', pollutant_choice]]
date_format = "%Y-%m-%d"
start_date_temp = new_df.iloc[len(new_df.index) - 1]['date']
start_date = str(start_date_temp)[:10]
start_date = datetime.strptime(start_date, date_format)
target_date = datetime.strptime(date, date_format)
date_difference = target_date - start_date
new_df = new_df.rename(columns={"date": "ds",
pollutant_choice: "y"})
# use ffill (forward fill) to handle missing value filling the missing value from the previous day
new_df = new_df.ffill()
# model training
prophet_model = Prophet()
prophet_model.fit(new_df)
# the parameter 'periods' represents the number of days you want to predict after 2016-04-30
future = prophet_model.make_future_dataframe(periods=date_difference.days)
forecast = prophet_model.predict(future)
# print(forecast)
if pollutant == "SO2" or pollutant == "NO2":
pollutant_unit = "parts per billion (ppb)"
elif pollutant == "O3" or pollutant == "CO":
pollutant_unit = "parts per million (ppm)"
temp = forecast[forecast['ds'] == date]
output = list(x for x in temp["yhat"])
# print(output)
return output[0], pollutant_unit | AIRPrediction | /AIRPrediction-0.0.8.tar.gz/AIRPrediction-0.0.8/Time_Series_Models/prophet_model.py | prophet_model.py |
import base64
import json
import re
import uuid
import requests
from .pdf import PDF
from . import exceptions
url = "https://ais.swisscom.com/AIS-Server/rs/v1.0/sign"
class AIS(object):
"""Client object holding connection information to the AIS service."""
def __init__(self, customer, key_static, cert_file, cert_key):
"""Initialize an AIS client with authentication information."""
self.customer = customer
self.key_static = key_static
self.cert_file = cert_file
self.cert_key = cert_key
self.byte_range = None
self.last_request_id = None
def _request_id(self):
request_id = self.last_request_id = uuid.uuid4().hex
return request_id
def post(self, payload):
""" Do the post request for this payload and return the signature part
of the json response.
:type payload: str
:rtype: dict
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json;charset=UTF-8',
}
cert = (self.cert_file, self.cert_key)
response = requests.post(url, data=payload, headers=headers,
cert=cert)
sign_resp = response.json()['SignResponse']
result = sign_resp['Result']
if 'Error' in result['ResultMajor']:
raise exceptions.error_for(response)
return sign_resp
def sign_batch(self, pdfs):
"""Sign a batch of files.
:type pdfs: list(PDF)
"""
# prepare pdfs in one batch
# payload in batch
PDF.prepare_batch(pdfs)
payload_documents = {
"DocumentHash" + str(count): {
"@ID": count,
"dsig.DigestMethod": {
"@Algorithm":
"http://www.w3.org/2001/04/xmlenc#sha256"
},
"dsig.DigestValue": pdf.digest()
}
for count, pdf in enumerate(pdfs)
}
payload = {
"SignRequest": {
"@RequestID": self._request_id(),
"@Profile": "http://ais.swisscom.ch/1.0",
"OptionalInputs": {
"ClaimedIdentity": {
"Name": ':'.join((self.customer, self.key_static)),
},
"SignatureType": "urn:ietf:rfc:3369",
"AdditionalProfile":
"http://ais.swisscom.ch/1.0/profiles/batchprocessing",
"AddTimestamp": {"@Type": "urn:ietf:rfc:3161"},
"sc.AddRevocationInformation": {"@Type": "BOTH"},
},
"InputDocuments": payload_documents
}
}
payload_json = json.dumps(payload, indent=4)
payload_json = re.sub(r'"DocumentHash\d+"', '"DocumentHash"',
payload_json)
sign_resp = self.post(payload_json)
other = sign_resp['SignatureObject']['Other']['sc.SignatureObjects']
for signature_object in other['sc.ExtendedSignatureObject']:
signature = Signature(base64.b64decode(
signature_object['Base64Signature']['$']
))
which_document = int(signature_object['@WhichDocument'])
pdf = pdfs[which_document]
pdf.write_signature(signature)
def sign_one_pdf(self, pdf):
"""Sign the given pdf file.
:type pdf: PDF
"""
pdf.prepare()
payload = {
"SignRequest": {
"@RequestID": self._request_id(),
"@Profile": "http://ais.swisscom.ch/1.0",
"OptionalInputs": {
"ClaimedIdentity": {
"Name": ':'.join((self.customer, self.key_static)),
},
"SignatureType": "urn:ietf:rfc:3369",
"AddTimestamp": {"@Type": "urn:ietf:rfc:3161"},
"sc.AddRevocationInformation": {"@Type": "BOTH"},
},
"InputDocuments": {
"DocumentHash": {
"dsig.DigestMethod": {
"@Algorithm":
"http://www.w3.org/2001/04/xmlenc#sha256"
},
"dsig.DigestValue": pdf.digest()
},
}
}
}
sign_response = self.post(json.dumps(payload))
signature = Signature(base64.b64decode(
sign_response['SignatureObject']['Base64Signature']['$']
))
pdf.write_signature(signature)
class Signature(object):
"""A cryptographic signature returned from the AIS webservice."""
def __init__(self, contents):
"""Build a Signature."""
self.contents = contents | AIS.py | /AIS.py-0.2.2.linux-x86_64.tar.gz/usr/local/lib/python3.5/dist-packages/AIS/ais.py | ais.py |
import base64
import codecs
import hashlib
import shutil
import subprocess
import tempfile
import PyPDF2
from pkg_resources import resource_filename
from . import exceptions
from . import helpers
class PDF(object):
"""A container for a PDF file to be signed and the signed version."""
def __init__(self, in_filename, prepared=False):
self.in_filename = in_filename
"""Filename of the PDF to be treated."""
_out_fp, _out_filename = tempfile.mkstemp(suffix=".pdf")
self.out_filename = _out_filename
"""Filename of the output, signed PDF."""
shutil.copy(self.in_filename, self.out_filename)
self.prepared = prepared
"""Is the PDF prepared with an empty signature?"""
@staticmethod
def _java_command():
java_dir = resource_filename(__name__, 'empty_signer')
return [
'java',
'-cp', '.:vendor/itextpdf-5.5.9.jar',
'-Duser.dir={}'.format(java_dir),
'EmptySigner',
]
@classmethod
def prepare_batch(cls, pdfs):
"""Add an empty signature to each of pdfs with only one java call."""
pdfs_to_prepare = filter(lambda p: not p.prepared, pdfs)
subprocess.check_call(
cls._java_command() +
[pdf.out_filename for pdf in pdfs_to_prepare]
)
for pdf in pdfs_to_prepare:
pdf.prepared = True
def prepare(self):
"""Add an empty signature to self.out_filename."""
if not self.prepared:
subprocess.check_call(
self._java_command() + [self.out_filename],
)
self.prepared = True
def digest(self):
reader = PyPDF2.PdfFileReader(self.out_filename)
sig_obj = None
for generation, idnums in reader.xref.items():
for idnum in idnums:
if idnum == 0:
break
pdf_obj = PyPDF2.generic.IndirectObject(idnum, generation,
reader).getObject()
if (
isinstance(pdf_obj, PyPDF2.generic.DictionaryObject) and
pdf_obj.get('/Type') == '/Sig'
):
sig_obj = pdf_obj
break
if sig_obj is None:
raise exceptions.MissingPreparedSignature
self.byte_range = sig_obj['/ByteRange']
h = hashlib.sha256()
with open(self.out_filename, 'rb') as fp:
for start, length in (self.byte_range[:2], self.byte_range[2:]):
fp.seek(start)
h.update(fp.read(length))
result = base64.b64encode(h.digest())
if helpers.PY3:
result = result.decode('ascii')
return result
def write_signature(self, signature):
""" Write the signature in the pdf file
:type signature: Signature
"""
with open(self.out_filename, "rb+") as fp:
fp.seek(self.byte_range[1] + 1)
fp.write(codecs.encode(signature.contents, 'hex')) | AIS.py | /AIS.py-0.2.2.linux-x86_64.tar.gz/usr/local/lib/python3.5/dist-packages/AIS/pdf.py | pdf.py |
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License version 3
as published by the Free Software Foundation with the addition of the
following permission added to Section 15 as permitted in Section 7(a):
FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED BY
ITEXT GROUP. ITEXT GROUP DISCLAIMS THE WARRANTY OF NON INFRINGEMENT
OF THIRD PARTY RIGHTS
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, see http://www.gnu.org/licenses or write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA, 02110-1301 USA, or download the license from the following URL:
http://itextpdf.com/terms-of-use/
The interactive user interfaces in modified source and object code versions
of this program must display Appropriate Legal Notices, as required under
Section 5 of the GNU Affero General Public License.
In accordance with Section 7(b) of the GNU Affero General Public License,
a covered work must retain the producer line in every PDF that is created
or manipulated using iText.
You can be released from the requirements of the license by purchasing
a commercial license. Buying such a license is mandatory as soon as you
develop commercial activities involving the iText software without
disclosing the source code of your own applications.
These activities include: offering paid services to customers as an ASP,
serving PDFs on the fly in a web application, shipping iText with a closed
source product.
For more information, please contact iText Software Corp. at this
address: [email protected]
| AIS.py | /AIS.py-0.2.2.linux-x86_64.tar.gz/usr/local/lib/python3.5/dist-packages/AIS/empty_signer/vendor/LICENSE.md | LICENSE.md |
AIS2.py
=======
.. image:: https://img.shields.io/pypi/v/AIS2.py.svg
:target: https://pypi.org/project/AIS2.py
:alt: PyPI version
.. image:: https://img.shields.io/pypi/pyversions/AIS2.py.svg
:target: https://pypi.org/project/AIS2.py
:alt: Python versions
.. image:: https://github.com/seantis/AIS2.py/actions/workflows/python-tox.yaml/badge.svg
:target: https://github.com/seantis/AIS2.py/actions
:alt: Tests
.. image:: https://readthedocs.org/projects/ais2py/badge/?version=latest
:target: https://ais2py.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://codecov.io/gh/seantis/AIS2.py/branch/master/graph/badge.svg?token=NRPFO5L0PG
:target: https://codecov.io/gh/seantis/AIS2.py
:alt: Codecov.io
.. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white
:target: https://github.com/pre-commit/pre-commit
:alt: pre-commit
AIS.py: a Python interface for the Swisscom All-in Signing Service (aka AIS).
AIS2.py is a fork created to get rid of the licensing woes affected itext dependency and replace it with pyHanko. Furthermore the API was slightly adjusted to be more flexible, so buffers can be passed around rather than files that need to exist on the filesystem.
AIS2.py works like this:
.. code-block:: python
>>> from AIS import AIS, PDF
>>> client = AIS('alice', 'a_secret', 'a.crt', 'a.key')
>>> pdf = PDF('source.pdf')
>>> ais.sign_one_pdf(pdf)
>>> with open('target.pdf', 'wb') as fp:
... fp.write(pdf.out_stream.getvalue())
...
License
-------
Copyright (C) 2016 Camptocamp SA
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
| AIS2.py | /AIS2.py-2.2.1.tar.gz/AIS2.py-2.2.1/README.rst | README.rst |
import base64
import json
import uuid
import requests
from . import exceptions
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .pdf import PDF
url = 'https://ais.swisscom.com/AIS-Server/rs/v1.0/sign'
class AIS:
"""Client object holding connection information to the AIS service."""
last_request_id: Optional[str]
"""Contains the id of the last request made to the AIS API."""
def __init__(
self,
customer: str,
key_static: str,
cert_file: str,
cert_key: str
):
"""Initialize an AIS client with authentication information."""
self.customer = customer
self.key_static = key_static
self.cert_file = cert_file
self.cert_key = cert_key
self.last_request_id = None
def _request_id(self) -> str:
self.last_request_id = uuid.uuid4().hex
return self.last_request_id
def post(self, payload: str) -> Dict[str, Any]:
""" Do the post request for this payload and return the signature part
of the json response.
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json;charset=UTF-8',
}
cert = (self.cert_file, self.cert_key)
response = requests.post(url, data=payload, headers=headers,
cert=cert)
sign_resp = response.json()['SignResponse']
result = sign_resp['Result']
if 'Error' in result['ResultMajor']:
raise exceptions.error_for(response)
return sign_resp
def sign_batch(self, pdfs: Sequence['PDF']) -> None:
"""Sign a batch of files."""
# Let's just return if the batch is empty somehow
if not pdfs:
return
# Let's not be pedantic and allow a batch of size 1
if len(pdfs) == 1:
return self.sign_one_pdf(pdfs[0])
payload_documents = {
'DocumentHash': [
{
'@ID': index,
'dsig.DigestMethod': {
'@Algorithm': 'http://www.w3.org/2001/04/xmlenc#sha256'
},
'dsig.DigestValue': pdf.digest()
}
for index, pdf in enumerate(pdfs)
]
}
payload = {
'SignRequest': {
'@RequestID': self._request_id(),
'@Profile': 'http://ais.swisscom.ch/1.1',
'OptionalInputs': {
'AddTimestamp': {
'@Type': 'urn:ietf:rfc:3161'
},
'AdditionalProfile': [
'http://ais.swisscom.ch/1.0/profiles/batchprocessing'
],
'ClaimedIdentity': {
'Name': ':'.join((self.customer, self.key_static)),
},
'SignatureType': 'urn:ietf:rfc:3369',
'sc.AddRevocationInformation': {
'@Type': 'BOTH'
},
},
'InputDocuments': payload_documents
}
}
payload_json = json.dumps(payload, indent=4)
sign_resp = self.post(payload_json)
other = sign_resp['SignatureObject']['Other']['sc.SignatureObjects']
for signature_object in other['sc.ExtendedSignatureObject']:
signature = base64.b64decode(
signature_object['Base64Signature']['$']
)
which_document = int(signature_object['@WhichDocument'])
pdfs[which_document].write_signature(signature)
def sign_one_pdf(self, pdf: 'PDF') -> None:
"""Sign the given pdf file."""
payload = {
'SignRequest': {
'@RequestID': self._request_id(),
'@Profile': 'http://ais.swisscom.ch/1.1',
'OptionalInputs': {
'AddTimestamp': {
'@Type': 'urn:ietf:rfc:3161'
},
'AdditionalProfile': [],
'ClaimedIdentity': {
'Name': ':'.join((self.customer, self.key_static)),
},
'SignatureType': 'urn:ietf:rfc:3369',
'sc.AddRevocationInformation': {
'@Type': 'BOTH'
},
},
'InputDocuments': {
'DocumentHash': [{
'dsig.DigestMethod': {
'@Algorithm':
'http://www.w3.org/2001/04/xmlenc#sha256'
},
'dsig.DigestValue': pdf.digest()
}],
}
}
}
sign_response = self.post(json.dumps(payload))
signature = base64.b64decode(
sign_response['SignatureObject']['Base64Signature']['$']
)
pdf.write_signature(signature) | AIS2.py | /AIS2.py-2.2.1.tar.gz/AIS2.py-2.2.1/AIS/ais.py | ais.py |
import base64
from datetime import datetime
import io
from pyhanko.pdf_utils.incremental_writer import IncrementalPdfFileWriter
from pyhanko.sign import fields
from pyhanko.sign import signers
from pyhanko.sign.signers import cms_embedder
from .exceptions import SignatureTooLarge
from typing import overload
from typing import BinaryIO
from typing import Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .types import FileLike
from .types import SupportsBinaryRead
def is_seekable(fp: 'SupportsBinaryRead') -> bool:
return getattr(fp, 'seekable', lambda: False)()
class PDF:
"""A container for a PDF file to be signed and the signed version."""
@overload
def __init__(
self,
input_file: 'FileLike',
*,
out_stream: Optional[BinaryIO] = ...,
sig_name: str = ...,
sig_size: int = ...
): ...
@overload
def __init__(
self,
*,
inout_stream: BinaryIO,
sig_name: str = ...,
sig_size: int = ...
): ...
def __init__(
self,
input_file: Optional['FileLike'] = None,
*,
inout_stream: Optional[BinaryIO] = None,
out_stream: Optional[BinaryIO] = None,
sig_name: str = 'Signature',
sig_size: int = 64*1024, # 64 KiB
):
"""Accepts either a filename or a file-like object.
It is the callers responsibility to ensure that buffers
passed as `input_file` are pointing to the start of the file.
We make no attempt to seek to the start of the file.
:param inout_stream: Optional stream that will directly
be used as both the input and output stream for in-place
signing.
:param out_stream: Optional stream that will be used to
store the signed PDF. By default a BytesIO stream will
be created to store the signed PDF.
:param sig_name: Name of the Signature field to use. If
no Signature with that name exists in the PDF, a new one
will be created.
:param sig_size: Size of the signature in DER encoding
in bytes. By default 64KiB will be reserved, which should
be enough for most cases right now.
"""
in_place = out_stream is None
writer_stream: 'SupportsBinaryRead'
if isinstance(inout_stream, io.BytesIO):
# in this case we create the signed version in-place
# so the out_stream will be assigned the in_stream
writer_stream = inout_stream
assert out_stream is None
elif input_file is None:
raise ValueError('Either input_file or in_stream needs to be set')
elif isinstance(input_file, str):
# in this case we just read the entire file into a buffer
# and create the signed version in-place
with open(input_file, 'rb') as fp:
writer_stream = io.BytesIO(fp.read())
elif is_seekable(input_file):
# in this case we can't assume that we're allowed to
# create the signed version in-place, but we can allow
# the IncrementalPdfFileWriter to operate on the input
# file directly.
writer_stream = input_file
out_stream = out_stream or io.BytesIO()
in_place = False
else:
# in this case we can't seek the input file so we need
# to read the entire file into a buffer as well
writer_stream = io.BytesIO(input_file.read())
writer = IncrementalPdfFileWriter(writer_stream)
self.cms_writer = cms_embedder.PdfCMSEmbedder().write_cms(
field_name=sig_name,
writer=writer
)
"""CMS Writer used for embedding the signature"""
next(self.cms_writer)
self.sig_size = sig_size
"""Number of bytes reserved for the signature.
It is the caller's responsibility to ensure that this is
large enough to store the entire signature.
Currently 64KiB (the default) appear to be enough. But
the signature has been known to grow in size over the
years.
"""
if in_place:
assert out_stream is None
assert hasattr(writer_stream, 'write')
out_stream = writer_stream # type: ignore[assignment]
self.sig_io_setup = cms_embedder.SigIOSetup(
md_algorithm='sha256',
in_place=in_place,
output=out_stream
)
"""Signing I/O setup to be passed to pyHanko"""
@property
def out_stream(self) -> BinaryIO:
"""Output stream for the signed PDF."""
return self.sig_io_setup.output
def digest(self) -> str:
"""Computes the PDF digest."""
sig_obj = signers.SignatureObject(
timestamp=datetime.now(),
bytes_reserved=self.sig_size,
)
self.cms_writer.send(
cms_embedder.SigObjSetup(
sig_placeholder=sig_obj,
mdp_setup=cms_embedder.SigMDPSetup(
md_algorithm='sha256',
certify=True,
docmdp_perms=fields.MDPPerm.NO_CHANGES,
)
)
)
digest, out_stream = self.cms_writer.send(self.sig_io_setup)
assert out_stream is self.out_stream
result = base64.b64encode(digest.document_digest)
return result.decode('ascii')
def write_signature(self, signature: bytes) -> None:
""" Writes the signature into the pdf file.
`digest` needs to be called first.
:raises: :class:`SignatureTooLarge`: If sig_size is
too small to store the entire signature.
"""
signature_size = len(signature)*2 # account for hex encoding
if signature_size > self.sig_size:
raise SignatureTooLarge(signature_size)
self.cms_writer.send(signature) | AIS2.py | /AIS2.py-2.2.1.tar.gz/AIS2.py-2.2.1/AIS/pdf.py | pdf.py |
AISTLAB\_nitrotyper
===================
https://www.nitrotype.com/race auto typer using python3 and cv2
only supported 1920x1080 Resolution currently.
winxos, AISTLAB 2017-03-17
INSTALL:
--------
pip3 install nitrotyper
USAGE:
------
1. open https://www.nitrotype.com/race using your web browser
2. open console and run **nitrotyper** command.
3. make sure the web browser is on the top layer of the desktop, enjoy
it.
4. delay parameters can control the type speed.
*Just for educational purpose, take care of yourself.*
| AISTLAB_nitrotyper | /AISTLAB_nitrotyper-0.6.10.tar.gz/AISTLAB_nitrotyper-0.6.10/README.rst | README.rst |
# AISTLAB_nitrotyper
> https://www.nitrotype.com/race auto typer using python3 and cv2
>
> only supported 1920x1080 Resolution currently.
>
> winxos, AISTLAB 2017-03-17
##INSTALL:
pip3 install nitrotyper
## USAGE:
1. open https://www.nitrotype.com/race using your web browser
2. open console and run **nitrotyper** command.
3. make sure the web browser is on the top layer of the desktop, enjoy it.
4. delay parameters can control the type speed.
*Just for educational purpose, take care of yourself.*
| AISTLAB_nitrotyper | /AISTLAB_nitrotyper-0.6.10.tar.gz/AISTLAB_nitrotyper-0.6.10/README.md | README.md |
import cv2
import pyautogui as pg
import numpy as np
import time
import json
def img_dhash_to_hex(src):
h, w = src.shape[:2]
hash_str = ""
for r in range(h - 1):
for c in range(w):
hash_str += "%d" % (src[r][c] == 0)
return '%0*X' % ((len(hash_str) + 3) // 4, int(hash_str, 2))
def hash_diff_for_hex(h1, h2):
return bin(int(h1, 16) ^ int(h2, 16)).count("1")
def image_recognize(img):
mh = img_dhash_to_hex(img)
res = []
for i in char_data.keys():
if char_data[i] != "":
res.append((hash_diff_for_hex(mh, char_data[i]), i))
return sorted(res)
def get_roi_bounding_rect(img, color_min, color_max):
roi_mask = cv2.inRange(img.copy(), color_min, color_max)
roi_mask = cv2.erode(roi_mask, None, iterations=1)
# cv2.imshow("mask", roi_mask)
return cv2.boundingRect(roi_mask) # x,y,w,h
def sampler(im_char, imgs=[]):
img_h = img_dhash_to_hex(im_char)
if img_h not in imgs:
imgs.append(img_h)
cv2.imwrite("./data/%s.png" % str(img_h), im_char)
def run(delay=0.01, is_sampler=False):
NORMAL_CHAR_BOX_COLOR = np.array([160, 234, 172])
ERROR_CHAR_BOX_COLOR = np.array([160, 170, 234])
def get_current_line_rect():
def found_current_rect():
timer = 0
while True:
im = pg.screenshot()
im_raw = np.array(im)
im_raw = cv2.cvtColor(im_raw, cv2.COLOR_BGR2RGB)
# cv2.imwrite("a.png", im_raw)
roi_mask = cv2.inRange(im_raw.copy(), NORMAL_CHAR_BOX_COLOR, NORMAL_CHAR_BOX_COLOR)
# cv2.imwrite("b.png", roi_mask)
roi_mask = cv2.erode(roi_mask, None, iterations=1)
x, y, w, h = cv2.boundingRect(roi_mask) # x,y,w,h
if w * h > 0:
break
time.sleep(0.1)
timer += 1
print("[debug] try found bound. %d" % timer)
if timer > 60:
pg.press("f5")
print("[debug] time out, refresh")
time.sleep(3)
timer = 0
return x, y, w, h
bx, by, bw, bh = found_current_rect()
r = (bx - 5, by, bx + bw * 42, by + bh)
return r, bw, bh
r, bw, bh = get_current_line_rect()
enter_counter = 0
last_x = 0
miss = 0
while True:
if 300 > miss > 200: # nothing found state
print("[debug] enter.")
pg.press("enter")
time.sleep(1)
r, _, _ = get_current_line_rect()
elif miss > 500:
pg.press("f5")
time.sleep(1)
miss = 0
st = time.clock()
sub = pg.screenshot(region=r) # todo 截屏速度太慢,考虑提高截屏速度或者一次打多个
# print("[debug]sreenshot %f" % (time.clock() - st))
cvs = np.array(sub)
cvs = cv2.cvtColor(cvs, cv2.COLOR_BGR2RGB)
# print("[debug]convert %f" % (time.clock() - st))
x, y, w, h = get_roi_bounding_rect(cvs, NORMAL_CHAR_BOX_COLOR, NORMAL_CHAR_BOX_COLOR)
if w * h != bw * bh: # err
x, y, w, h = get_roi_bounding_rect(cvs, ERROR_CHAR_BOX_COLOR, ERROR_CHAR_BOX_COLOR)
if w * h != bw * bh: # nothing found.
print("[debug] miss %d." % miss)
miss += 1
continue
else:
miss = 0
if last_x == x: # type miss match error state
enter_counter += 1
print("[debug] retry times %d" % enter_counter)
if enter_counter == 6:
pg.press("enter")
print("[debug] press enter")
elif enter_counter == 10:
pg.press("enter")
print("[debug] press enter")
else:
enter_counter = 0
last_x = x
im_char = cvs[y:y + h, x:x + w, :]
im_char = cv2.cvtColor(im_char, cv2.COLOR_RGB2GRAY)
t, im_char = cv2.threshold(im_char, 0, 255, cv2.THRESH_OTSU)
# im_char=cv2.resize(im_char,(10,30))
# print("[debug]exact %f" % (time.clock() - st))
ch = image_recognize(im_char)
print("[debug] recognize %s time used:%f" % (str(ch[:3]), time.clock() - st))
if ch[0][0] < 20: # auto press
pg.press(ch[0][1])
if is_sampler:
sampler(im_char)
# cv2.imshow("raw", cvs)
time.sleep(delay)
key = cv2.waitKey(1)
if key == 27:
break # esc退出程序
import pkgutil # 必须采用pkgutil.get_data才能读取egg格式包中的数据
try:
f = pkgutil.get_data("nitrotyper", 'data/chars.json').decode('utf-8') #
char_data = json.loads(f)
except IOError as e:
print("[error] %s" % e)
exit()
if __name__ == "__main__":
run(delay=0) | AISTLAB_nitrotyper | /AISTLAB_nitrotyper-0.6.10.tar.gz/AISTLAB_nitrotyper-0.6.10/nitrotyper/nitrotyper.py | nitrotyper.py |
from flask import Flask, request, render_template
import threading
import json
import urllib.request, urllib.error
import os, sys
import queue
from novel_grab.novel_grab import Downloader
app = Flask(__name__, static_url_path="") # omit static
with open(os.path.join(sys.path[0], 'novels.json'), 'r', encoding='utf-8') as gf: # relative path
novels = json.load(gf)
def index_novel(u):
for i, m in enumerate(novels):
if m["from"] == u:
return i
return -1
def add_item(n, f, d):
id = index_novel(f)
if id < 0:
novels.append({"id": len(novels) + 1, "name": n, "from": f, "state": 0, "download": d})
return True, len(novels) - 1
else:
return False, id
@app.route('/update')
def update():
for g in grab_list:
novels[g.info["id"]]['state'] = "%d" % g.get_info()["percent"]
if g.get_info()["percent"] == 100:
with open(os.path.join(sys.path[0], 'novels.json'), 'w', encoding='utf-8') as outfile:
json.dump(novels, outfile)
grab_list.remove(g)
return json.dumps(novels)
grab = Downloader()
grab_list = []
@app.route('/')
def index():
global grab
url = request.query_string.decode('utf8')
print(url)
if not str(url).startswith('http'):
url = "http://" + url
try:
urllib.request.urlopen(url, timeout=1000)
except urllib.error.URLError or urllib.error.HTTPError as e:
return render_template('index.html', url=url, sites=grab.get_info()["supported_sites"], urlerror=str(e.reason))
if not grab.set_url(url):
return render_template('index.html', url=url, sites=grab.get_info()["supported_sites"], urlerror="页面地址并非全部章节页面")
nid = index_novel(url)
if nid < 0: # first add
grab.start()
grab_list.append(grab)
_, grab.info["id"] = add_item(n=grab.get_info()["novel_name"], f=url, d=grab.get_info()["file_name"])
return render_template('index.html', sites=grab.get_info()["supported_sites"],
name=grab.get_info()["novel_name"], url=url, novels=novels)
else:
return render_template('index.html', alreadyid=nid + 1, sites=grab.get_info()["supported_sites"],
name=grab.get_info()["novel_name"], url=url, novels=novels)
return "invalid."
def main():
"""
insert localhost:777? before your novel chapters index page.
"""
os.chdir(os.path.join(sys.path[0], "static")) # switch for download
app.run(host='0.0.0.0', debug=True, port=777) # todo 生产环境运行
if __name__ == '__main__':
main() | AISTLAB_novel_downloader | /AISTLAB_novel_downloader-1.0.0.tar.gz/AISTLAB_novel_downloader-1.0.0/novel_downloader/web.py | web.py |
AISTLAB novel grab
==================
novel grab crawler module using python3 and lxml
multiprocesssing with multithread version
winxos, AISTLAB Since 2017-02-19
INSTALL:
--------
``pip3 install aistlab_novel_grab``
1. USAGE:
---------
RUN COMMAND IN CONSOLE:
``novel_grab http://the_url_of_novel_chapters_page``
EXAMPLE:
``novel_grab http://book.zongheng.com/showchapter/654086.html``
**SUPPORTED SITES:** \* http://book.zongheng.com \*
http://www.aoyuge.com \* http://www.quanshu.net
2. USAGE AS PYTHON MODULE:
--------------------------
.. code:: python
from novel_grab.novel_grab import Downloader
d = Downloader()
print(d.get_info())
if d.set_url('http://book.zongheng.com/showchapter/221579.html'):
d.start()
**TIPS** \* When d = Downloader(), d.get\_info() can get supported
sites info. \* Once d.set\_url(url) will return the url is valid or
not. \* Of course you can use d.get\_info() to access the state of d
at any time. \* While finished, will create :math:`novel_name`.zip
file in your current path, default zip method using
zipfile.ZIP\_DEFLATED
**Just for educational purpose, take care of yourself.**
| AISTLAB_novel_grab | /AISTLAB_novel_grab-1.2.12.tar.gz/AISTLAB_novel_grab-1.2.12/README.rst | README.rst |
# AISTLAB novel grab
> novel grab crawler module using python3 and lxml
>
> multiprocesssing with multithread version
>
> winxos, AISTLAB Since 2017-02-19
## INSTALL:
``` pip3 install aistlab_novel_grab ```
## 1. USAGE:
RUN COMMAND IN CONSOLE:
```novel_grab http://the_url_of_novel_chapters_page```
EXAMPLE:
```novel_grab http://book.zongheng.com/showchapter/654086.html```
> **SUPPORTED SITES:**
* http://book.zongheng.com
* http://www.aoyuge.com
* http://www.quanshu.net
## 2. USAGE AS PYTHON MODULE:
``` python
from novel_grab.novel_grab import Downloader
d = Downloader()
print(d.get_info())
if d.set_url('http://book.zongheng.com/showchapter/221579.html'):
d.start()
```
>**TIPS**
* When d = Downloader(), d.get_info() can get supported sites info.
* Once d.set_url(url) will return the url is valid or not.
* Of course you can use d.get_info() to access the state of d at any time.
* While finished, will create $novel_name$.zip file in your current path, default zip method using zipfile.ZIP_DEFLATED
**Just for educational purpose, take care of yourself.**
| AISTLAB_novel_grab | /AISTLAB_novel_grab-1.2.12.tar.gz/AISTLAB_novel_grab-1.2.12/README.md | README.md |
import urllib.request
import urllib.error
from urllib.parse import urlsplit
from lxml import etree
import json
from multiprocessing import Pool
import multiprocessing
from time import clock
import zipfile
import pkgutil # 必须采用pkgutil.get_data才能读取egg格式包中的数据
from multiprocessing.pool import ThreadPool
from threading import Thread
import itertools
import sys
ENABLE_DEBUG_OUTPUT = True
OPEN_MULTI_THREAD = True
THREAD_NUMS = 10
def m_print(s, end="\n"):
if ENABLE_DEBUG_OUTPUT:
print(s, end=end)
def list_1d_to_2d(li, col):
row = len(li) // col
ans = [li[col * i: col * (i + 1)] for i in range(row)]
if li[col * row:]:
ans.append(li[col * row:])
return ans
def list_2d_to_1d(li):
return list(itertools.chain.from_iterable(li))
def extract_data(selector, xpath):
return selector.xpath(xpath)
class Downloader:
def __init__(self):
self.items = {} # must not assign outside
self.site_args = {}
self.info = {"percent": 0}
try:
self.sites_config = json.loads(pkgutil.get_data("novel_grab", 'grab_config.json').decode('utf-8'))
except IOError as e:
m_print("[error] %s" % e)
exit()
supported_sites = []
for n in self.sites_config["sites"]:
supported_sites.append(n["site"])
self.info["supported_sites"] = supported_sites
def set_url(self, url):
if not url.startswith("http"):
url = "http://" + url
if self.get_site_args(url):
if self.get_novel_info(url):
return True
return False
def get_site_args(self, url_entry):
rule_id = -1
server_url = "{0.scheme}://{0.netloc}/".format(urlsplit(url_entry))
for i, info in enumerate(self.sites_config["sites"]):
if server_url == info["site"]:
rule_id = i
m_print("[debug] match config %s with rule %s" % (server_url, rule_id))
break
if rule_id < 0:
print("[debug] 该网站暂时不支持")
return False
self.site_args = self.sites_config["sites"][rule_id]
if url_entry.endswith("/"): # for some relative href site
server_url = url_entry
self.items["server_url"] = server_url
return True
def get_content(self, url, try_times=10):
try:
f = urllib.request.urlopen(url)
return etree.HTML(f.read().decode(self.site_args["charset"]))
except UnicodeDecodeError as ude:
m_print("[error] decode error %s" % url)
m_print("[debug] info %s" % ude)
except urllib.error.URLError or TimeoutError: # 捕捉访问异常,一般为timeout,信息在e中
m_print("[warning] %d retry %s" % (try_times, url))
# m_print(traceback.format_exc())
try_times -= 1
if try_times > 0:
return self.get_content(url, try_times)
return None
def get_chapter(self, url):
try:
s = self.get_content(url)
if s is None:
return
c = extract_data(s, self.site_args["chapter_content"])[0]
# raw elements of div filter none text element
# support two type, div//p/text div/text
raw_txt = [x.xpath("text()") for x in c.xpath(self.site_args["chapter_content"])]
if len(raw_txt) > 1:
raw_txt = [x[0] for x in raw_txt]
else:
raw_txt = raw_txt[0]
# remove some strange blank.
data = "\n".join([t.strip() for t in raw_txt])
# data = data.replace('\xa0', '')
for x in self.site_args["replace"]:
for src, des in x.items():
data = data.replace(src, des)
except TimeoutError: # 捕捉访问异常,一般为timeout,信息在e中
m_print("[error] %s" % url)
return None
return data
def get_novel_info(self, url_entry):
html = self.get_content(url_entry)
self.items["title"] = extract_data(html, self.site_args["title"])
if not self.items["title"]: # []
m_print("[error] grab title pattern can't match.")
return False
self.items["title"] = self.items["title"][0].xpath("string(.)")
self.items["author"] = extract_data(html, self.site_args["author"])
if not self.items["author"]: # []
m_print("[error] grab author pattern can't match.")
return False
self.items["author"] = self.items["author"][0].xpath("string(.)")
chapter_name = extract_data(html, self.site_args["chapter_name"])
if not chapter_name: # []
m_print("[error] grab chapter links pattern can't match.")
return False
chapter_href = extract_data(html, self.site_args["chapter_href"])
if not chapter_href: # []
m_print("[error] grab chapter links pattern can't match.")
return False
if not str(chapter_href[0]).startswith("http"): # not absolute link
chapter_href = [self.items["server_url"] + h for h in chapter_href]
m_print("[debug] novel_info %s downloaded." % url_entry)
m_print("[debug] title:%s" % self.items["title"])
m_print("[debug] author:%s" % self.items["author"])
m_print("[debug] chapters:%d" % len(chapter_name))
self.info["novel_name"] = "%s %s" % (self.items["title"], self.items["author"])
self.info["file_name"] = self.info["novel_name"] + ".zip"
self.items["chapters"] = zip(chapter_href, chapter_name)
return True
def crawler(self, chapter_info):
h, c = chapter_info
p = self.get_chapter(h)
if p is None:
m_print("[error] downloaded %s failed. %s" % (c, h))
p = "download error, failed at %s" % h
# else:
# m_print("[debug] downloaded %s" % c)
return c, p
def multi_thread_do_job(self, l, size=THREAD_NUMS):
tp = ThreadPool(size)
results = tp.map(self.crawler, l)
tp.close()
tp.join()
return results
def run(self):
"""
usage:
from novel_grab import novel_grab
novel_grab.download('the url or the novel all chapter page')
if the site supported, then will download all the content and create a zip file.
if you wanna make it support other site, let me know
have fun
winxos 2017-04-02
just for educational purpose.
"""
with Pool(processes=multiprocessing.cpu_count()) as pool:
st = clock()
m_print("[debug] downloading: %s" % self.items["title"])
# results = pool.map(self.crawler, chapters)
tasks = []
li = list(self.items["chapters"])
fun = self.crawler
if OPEN_MULTI_THREAD:
li = list_1d_to_2d(li, THREAD_NUMS)
fun = self.multi_thread_do_job
for i, d in enumerate(li):
tasks.append(pool.apply_async(fun, args=(d,)))
pool.close()
results = []
for i, r in enumerate(tasks):
results.append(r.get())
self.info["percent"] = i * 100 / len(tasks)
if i % multiprocessing.cpu_count() == 0:
m_print("\r[debug] downloading progress %.2f%%" % (self.info["percent"]), end="")
m_print('\r[debug] download done. used:%.2f s' % (clock() - st))
if OPEN_MULTI_THREAD:
results = list_2d_to_1d(results)
self.create_zip_file(results) # , method=zipfile.ZIP_LZMA)#ZIP_LZMA is slow more than deflated
m_print('[debug] all done.')
def start(self):
t = Thread(target=self.run)
t.start()
def get_info(self):
return self.info
# zip the file, ZIP_LZMA use lot of memory,
def create_zip_file(self, results, method=zipfile.ZIP_DEFLATED):
m_print('[debug] saving...')
st = clock()
raw_file_name = self.info["novel_name"] + ".txt"
novel_data = "%s\n%s\n\n" % (self.items["title"], self.items["author"])
novel_data += "\n".join("%s\n%s\n" % r for r in results) # fast than normal for
print("[debug] format data used:%.2f s" % (clock() - st))
zf = zipfile.ZipFile(self.info["file_name"], 'w', method) # zipfile.ZIP_LZMA
zf.writestr(raw_file_name, novel_data) # add memory file,very fast than save to disk
zf.close()
print("[debug] zip data used:%.2f s" % (clock() - st))
self.info["percent"] = 100
m_print('[debug] saved to [%s]' % self.info["file_name"])
def test():
d = Downloader()
print(d.get_info())
if d.set_url('http://book.zongheng.com/showchapter/510426.html'):
d.start()
# d.download('http://www.quanshu.net/book/67/67604/')
# d.download('http://book.zongheng.com/showchapter/390021.html')
# d.download('http://www.aoyuge.com/14/14743/index.html')
# download('http://www.quanshu.net/book/38/38215/')
'''
usage:
d = Downloader()
if d.set_url('http://book.zongheng.com/showchapter/221579.html'):
d.start()
'''
def download():
args = sys.argv[1:]
if len(args) > 0:
print("downloading:%s" % args[0])
d = Downloader()
if d.set_url(args[0]):
d.start()
else:
print("[%s] is not a valid enter point of novel chapters.")
else:
print("USAGE:\n\tnovel_download http://the_url_of_novel_chapters_page")
if __name__ == '__main__':
download() | AISTLAB_novel_grab | /AISTLAB_novel_grab-1.2.12.tar.gz/AISTLAB_novel_grab-1.2.12/novel_grab/novel_grab.py | novel_grab.py |
<p align="center"><a href=""><img alt="AISandbox" src="public/assets/editor.svg" width="60%"/></a></p>
<p align="center"><a href="aisandbox.app">aisandbox.app</a></p>
<hr>
<p align="center">( In Development, pre-alpha ) Something like Figma, but for designing AI systems.</p>
<p align="center">
<a href="https://github.com/Shubhamai/AISandbox/blob/master/LICENSE">
<img alt="Excalidraw is released under the MIT license." src="https://img.shields.io/badge/license-MIT-blue.svg" />
</a>
</p>
<hr>
## About
AISandbox is a node-based editor that allows creating an architecture of multiple models with inputs and outputs to perform a range of tasks. Think of Figma but for designing AI systems graphically.
Note that is still in development, pre-alpha.
## Documentation
To run the graph as an API, the code is programmatically generated in the UI, but here's a sample code
```py
import requests
import json
payload = json.dumps(
{"data": [{"id": "TextInputNode-1", "data": {"text": "Hello World"}}]}
)
headers = {
"Content-Type": "application/json",
"Authorization": "YOUR_API_KEY",
"Project": "YOUR_PROJECT_ID",
}
response = requests.request("POST", "https://aisandbox.app/api/v1/execute", headers=headers, data=payload)
```
## Tech Stack
### Front end
- [Nextjs 13](https://nextjs.org/docs) as the web framework.
- [shadcn ui](https://ui.shadcn.com/) for the UI components.
- [reactflow](https://reactflow.dev/) for the node editor.
- [Lucide](https://lucide.dev/) for icons.
- [zustand](https://zustand-demo.pmnd.rs/) for state management.
#### Backend
- [Supabase](https://supabase.com/) for user authentication and serves as the main database.
- [Stripe](https://stripe.com/) for Payments.
- [OpenAI](https://openai.com/) and [Replicate](https://replicate.com/) to run most of the models.
### Infrastructure
- The site is deployed on [cloudflare](https://www.cloudflare.com/).
## Self Hosting
**(In progress)**
To self-host this application ( at least some of it ), follow the steps :
- Fill up the following API keys in `.env.example`, then rename it to `.env`.
- Make an account of supabase, and create the following DBs with types provided in [](./types_db.ts)
- Run `pnpm dev`.
## Acknowledgements
- Thanks to [Resend](https://resend.com/home), [Skiff](https://skiff.com/), [Supabase](https://supabase.com/) and [Figma](https://figma.com/) for side UI, design and landing page inspirations.
- Thanks to [Sniffnet](https://github.com/GyulyVGC/sniffnet/), [nextjs-subscription-payments](https://github.com/vercel/nextjs-subscription-payments) for readme, code structure.
| AISandbox | /AISandbox-0.0.1.tar.gz/AISandbox-0.0.1/README.md | README.md |
# AISimulation
## Installation
`python==3.10`上运行,其他依赖参考`requirements.txt`
## Demo
示例见`demo.py`
1. 根据传入的`json`文件, 实例化Experiment
``` python
exp = Experiment.load_exp(file="test/files4test/expe_config.json",
output_dir="experiments")
```
2. 实例化`actions`, 调用`get_action`
```python
def get_action(exp):
instruct_action = InstructAction(expe=exp)
probe_action = ProbeAction(expe=exp)
reflect_action = ReflectAction(expe=exp)
finetune_action = FinetuneAction(expe=exp)
return instruct_action, probe_action, reflect_action, finetune_action
```
3. 自定义原子化操作顺序, 下面以probe为例说明使用
```python
result = probe_action.probe(agent_id=1,
input='Do you like chinese food ?',
prompt="Your name is {}\n Your profile_list: {}. Now I will interview you. \n{}")
```
## 目录结构
```shell
aisimulation/
├── demo.py //使用示例
├── README.md
├── requirements.txt
├── scripts_TODO // 存放脚本的文件
├── AISimuToolKit
│ ├── exp
│ │ ├── actions // 定义原子操作
│ │ │ ├── base_action.py
│ │ │ ├── __init__.py
│ │ │ ├── instruct_action.py
│ │ │ ├── probe_action.py
│ │ │ ├── reflect_action.py
│ │ │ ├── rs.py
│ │ │ ├── setup.py
│ │ │ └── think.py
│ │ ├── agents // agent与agent所有的memory
│ │ │ ├── __init__.py
│ │ │ ├── agent.py
│ │ │ └── memory.py
│ │ ├── experiment.py // 实验类, 实例化以对与平台交互的部分进行管理
│ │ └── __init__.py
│ ├── __init__.py
│ ├── model // 平台支持的LLM
│ │ ├── __init__.py
│ │ ├── model.py // 存放LLM的API
│ │ └── register.py // 注册每个LLM, 以便通过config使用
│ ├── store
│ │ ├── __init__.py
│ │ └── text
│ │ └── logger.py
│ └── utils
│ ├── __init__.py
│ └── utils.py
├── temp // 暂时不用
│ ├── api_deprecated.py
│ |── model_api.py
│ └── rs.py
└── test
└── files4test // 示例config
└── expe_config.json
```
下面对每个模块进行详细说明
### Model
`AISimuToolKit/utils/model_api.py`文件。支持为每个agent定义一个model,目前支持`gpt-3.5`的api交互(但返回部分结果待测试),LLaMa的finetune(这个也需要进一步测试)。如果需要额外自定义model,可以继承`ApiBase`
之后,增加`ModelNameDict`字典。主要交互逻辑为:
```python
def chat(self, query: str, config: dict, *args, **kwargs):
message = [
{"role": "user", "content": content}
]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message,
)
return completion.choices[0].text
```
实验开始时,会为每个agent构建一个model,储存在`Experiment`中的字典`models` 中,key为agent的id,value为model的实例。在每次交互时,可以根据agent的id从字典中取出对应的model,然后调用`chat`等方法
<!-- ### External_ToolKit
`AISimuToolKit/utils/model_api.py`
文件。设想中,所有与外部工具(如推荐系统的交互)都可以通过api来实现,这部分待补充。每个toolkit可以有针对的不同agent,具体为设置的json中包含一个`target`
字段,这部分逻辑也待补充。 -->
### Actions
`AISimuToolKit/exp/actions`文件夹。任何agent的交互(无论是与model还是与外部工具)都可以通过action来实现,用户可以组合预先定义好的和自定义的action完成实验操作。每个原子操作都对应一个action, 同时提供`run`方法以用于对多个agent同时进行操作
目前支持的原子action有:
* `ActionBase`:基础action,所有action都需要继承这个类,实现`run`方法,这个方法不会在实际过程中调用
* `InstructAction`: 用于向agent发送指令,这个action不会与LLM产生交互,只是将输入的部分储存到对应的agent的memory中
* 传递参数为`[{"agent_id":agent_id,"question":question,"answer":answer}]`,将会对所有对应的agent执行操作
* `ProbeAction`: probe取探针的意思,用于实验过程中对agent的评估,问题会与LLM交互
* 参数为`[{"agent_id":agent_id,"input":input, "prompt": prompt}]`
* `ReflectionAction`: 反思, 旨在通过使用当前记忆流中的对话信息, 让LLM对自己的profile进行自我更新
* 参数为 `[{"num":num,"input":bool, "output": bool, "prompt": prompt}]`
* `FinetuneAction`: 微调, 旨在通过对话形式对模型参数进行影响
* 参数为 `[{"agent_id":agent_id,"num":num}]`, 微调模型的参数在实验的配置文件中`agent_list/model_settings/config`修改
<!-- * `RS`:写了一个简单的交互逻辑,主要用于演示如何调用相关变量 -->
如果需要自定义action,可以继承`ActionBase`。同时对`AISimuToolKit.model.register.ModelNameDict`通过`monkey patching`修改或直接`ModelNameDict['key']=CustomAction`
<!-- 实验初始化过程中,会遍历该目录下所有的文件,将所有继承`ActionBase`
的类储存在`actions`
字典中,key为类名,value为类的实例。实验过程中可以调用对应的`run`方法。 -->
### Experiment
`AISimuToolKit/exp/experiment.py`文件。该部分主要用于储存实验相关变量。`BaseAction`中储存了`expe`,需要的话可以直接调用
```python
class Experiment:
def __init__(self, agents: List[Agent], models: List[ApiBase], toolkits: List[ExternalToolkitApi], config: json):
self.agents = agents
self.models = models
# self.toolkits = toolkits
self.config = config
```
### Logger
`AISimuToolKit/store/text/logger.py`文件
1. 实现了一个日志记录器
2. 实现了单例模式(线程安全和进程安全)
3. 日志包括debug、info、warning、error、critical五个级别,可以通过`log_console`和`log_file`参数控制是否将日志输出到控制台和文件。
4. 额外实现了一个log方法,等价于warning
5. 实现了一个`history`函数,该函数会首先调用`info`方法,然后将日志储存到`history`文件中。
6. `log`文件用于储存所有的日志, 例如debug等; `history`文件用于储存实验过程中的原子操作记录。在调用时可以有所区别。
### Memory
`AISimuToolKit/exp/agents/memory.py`文件
每个agent都有一个memory,json格式存储记忆,字段包括自增id,交互对象,对话的两端,时间。memory的结构为:
```python
memory_item = {
"id": self.memory_id,
"interactant": interactant,
"question": question,
"answer": answer,
"time": str(current_time)
}
```
实现了根据`id`,`interactant`和最近访问的查找方法。记忆保存在内存中,同时会以append的形式不断插入文件。可以调用`export_memory`覆盖导出。
### Agent
```python
class Agent:
"""
储存agent的信息
"""
def __init__(self, agent_id: int,
name: str, profile: str, role: str, agent_path: str,
config: dict):
self.agent_id = agent_id
self.name = name
self.profile = profile
self.role = role
self.config = config
self.memory = Memory(memory_path=os.path.join(agent_path, "memory.jsonl"))
```
## 备注
1. 对于使用模型为LLaMA且不涉及基于用户的协同过滤, 建议一个agent跑完再下一个, 这样可以减少模型load的时间
2. 不一定所有方法和变量都测试到了,如果有bug可以随时联系。对于建议修改调整的部分,可以在文档中增加TODO。
另外,chatgpt接口返回的`finish_reason`的情况有待进一步测试,目前没找到除了stop以外的例子。
3. prompt需要精心设计, 语言模型会拒绝回答
4. 本来的想法是reflect返回的结果直接存到profile, 但是现在LLM放回的结果不一定好, 就把这一句暂时抽了出来
5. finetune操作最好在数据集两位数以上进行, 不然无法拆分出valsets
## TODO
1. 不同语言模型的原子操作调用的接口不太一样
| AISimuToolKit | /AISimuToolKit-0.1.4.tar.gz/AISimuToolKit-0.1.4/README.md | README.md |
import math
from scipy.linalg import logm, expm
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def rotx(ang):
a = math.radians(ang)
r = np.array([[1.0, 0.0, 0.0],
[0.0, math.cos(a), -math.sin(a)],
[0.0, math.sin(a), math.cos(a)]])
return r
def roty(ang):
a = math.radians(ang)
r = np.array([[math.cos(a), 0.0, math.sin(a)],
[0.0, 1.0, 0.0],
[-math.sin(a),0.0, math.cos(a)]])
return r
def rotz(ang):
a = math.radians(ang)
r = np.array([[math.cos(a), -math.sin(a), 0.0],
[math.sin(a), math.cos(a), 0.0],
[0.0, 0.0, 1.0]])
return r
def rpy2r(gamma,beta,alpha):
r = rotz(alpha).dot(roty(beta).dot(rotx(gamma)))
return r
def r2rpy(r):
beta = math.atan2(-r[2][0], math.sqrt(r[0][0]**2 + r[1][0]**2))
if (math.cos(beta) != 0.0):
alpha = math.atan2(r[1][0]/math.cos(beta), r[0][0]/math.cos(beta))
gamma = math.atan2(r[2][1]/math.cos(beta), r[2][2]/math.cos(beta))
elif beta == math.pi/2:
alpha = 0.0
gamma = math.atan2(r[0][1],r[1][1])
else:
alpha = 0.0
gamma = -math.atan2(r[0][1],r[1][1])
gamma = gamma*180/math.pi
alpha = alpha*180/math.pi
beta = beta*180/math.pi
return [gamma, beta, alpha]
def euler2r(alpha,beta,gamma):
r = rotz(alpha).dot(roty(beta).dot(rotz(gamma)))
return r
def r2euler(r):
beta = math.atan2(math.sqrt(r[2][0]**2 + r[2][1]**2),r[2][2])
if (math.sin(beta) != 0.0):
alpha = math.atan2(r[1][2]/math.sin(beta), r[0][2]/math.sin(beta))
gamma = math.atan2(r[2][1]/math.sin(beta), r[2][0]/math.sin(beta))
elif beta == 0.0:
alpha = 0.0
gamma = math.atan2(-r[0][1],r[0][0])
else:
alpha = 0.0
gamma = math.atan2(r[0][1],-r[0][0])
gamma = gamma*180/math.pi
alpha = alpha*180/math.pi
beta = beta*180/math.pi
return [gamma, beta, alpha]
# added 1400/12/12
def angvec2r(theta,v):
a = math.radians(theta)
st = math.sin(a)
ct = math.cos(a)
vt = 1 - ct
r = np.array(
[[v[0]*v[0]*vt+ct, v[0]*v[1]*vt-v[2]*st, v[0]*v[2]*vt+v[1]*st],
[v[0]*v[1]*vt+v[2]*st, v[1]*v[1]*vt+ct, v[1]*v[2]*vt-v[0]*st],
[v[0]*v[2]*vt-v[1]*st, v[1]*v[2]*vt+v[0]*st, v[2]*v[2]*vt+ct],])
return r
# added 1400/12/12
def r2angvec(r):
a = (r[0][0]+r[1][1]+r[2][2]-1)/2.0
ang = math.acos(a)
theta = math.acos(a) * 180/math.pi
v = [r[2][1]-r[1][2], r[0][2]-r[2][0], r[1][0]-r[0][1]]
v = np.multiply(v,0.5/math.sin(ang))
return [theta,v]
# added 1400/12/12
def r2angvec2(r):
a = (r[0][0]+r[1][1]+r[2][2]-1)/2.0
ang = math.acos(a)
theta = math.acos(a) * 180/math.pi
v = [r[2][1]-r[1][2], r[0][2]-r[2][0], r[1][0]-r[0][1]]
v = np.multiply(v,0.5/math.sin(ang))
return [theta,v]
#make a skew matrix
# added 1400/12/12
def skew(k):
r = np.array([[0, -k[2], k[1]],
[k[2], 0, -k[0]],
[-k[1], k[0], 0]])
return r
# drawing a rotation matrix
# added 1400/12/12
def plot(r):
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.plot3D([0,r[0][0]],
[0,r[1][0]],
[0,r[2][0]],'r')
ax.plot3D([0,r[0][1]],
[0,r[1][1]],
[0,r[2][1]],'g')
ax.plot3D([0,r[0][2]],
[0,r[1][2]],
[0,r[2][2]],'b')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
return
class SerialLink:
def __init__(self, name, links):
self.name = name
self.links = links
print(self.name)
print('--------------------------------------------')
print('i\talpha\ta\td\ttheta\ttype')
print('--------------------------------------------')
for i in range(np.size(self.links,0)):
print(i+1, end='\t')
for j in range(np.size(self.links,1)):
print(round(self.links[i][j],2), end='\t')
print('\n')
print('------------------------------------------')
#Friday 1402/2/8
def toDeg(self,radVal):
return radVal*180.0/np.pi
#create T i w.r.t i-1
def makeT(self,DH):
T = np.array([[math.cos(DH[3]), -math.sin(DH[3]), 0, DH[1]],
[math.sin(DH[3])*math.cos(DH[0]), math.cos(DH[3])*math.cos(DH[0]), -math.sin(DH[0]), -DH[2]*math.sin(DH[0])],
[math.sin(DH[3])*math.sin(DH[0]), math.cos(DH[3])*math.sin(DH[0]), math.cos(DH[0]), DH[2]*math.cos(DH[0])],
[0, 0, 0, 1]])
return T
def fkinCalc(self):
TT = np.eye(4)
for l in range(np.size(self.links,0)):
T = self.makeT(self.links[l])
TT = TT.dot(T)
return TT
def fkin(self,joints):
noOfJoints = np.size(joints)
if noOfJoints != np.size(self.links,0):
print('Number of specified joints is not correct.')
return
for i in range(np.size(joints)):
if self.links[i][4] == 0:
self.links[i][3] = joints[i]
else:
self.links[i][2] = joints[i]
T = self.fkinCalc()
return T
def plot(self):
fig = plt.figure(1)
ax = fig.add_subplot(projection='3d')
TT = np.eye(4)
for i in range(np.size(self.links,0)):
To = TT
TT = TT.dot(self.makeT(self.links[i]))
ax.plot3D([To[0][3],TT[0][3]],[To[1][3],TT[1][3]],[To[2][3],TT[2][3]])
ax.plot3D([TT[0][3],TT[0][3]+2*TT[0][0]],
[TT[1][3],TT[1][3]+2*TT[1][0]],
[TT[2][3],TT[2][3]+2*TT[2][0]])
ax.plot3D([TT[0][3],TT[0][3]+2*TT[0][1]],
[TT[1][3],TT[1][3]+2*TT[1][1]],
[TT[2][3],TT[2][3]+2*TT[2][1]],'g')
ax.plot3D([TT[0][3],TT[0][3]+2*TT[0][2]],
[TT[1][3],TT[1][3]+2*TT[1][2]],
[TT[2][3],TT[2][3]+2*TT[2][2]],'b')
minAll = min(TT[0][3],TT[1][3],TT[2][3])
maxAll = max(TT[0][3],TT[1][3],TT[2][3])
ax.plot3D([0, 0],[0,0],[0,minAll-1],'y')
ax.set_xlim(minAll-1, maxAll+1)
ax.set_ylim(minAll-1, maxAll+1)
ax.set_zlim(minAll-1, maxAll+1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title(self.name)
plt.show()
return
class Puma560(SerialLink):
def __init__(self,name):
self.name = name
a2 = 43.2
a3 = 0 #10
d3 = 0 #23.3
d4 = 43.2
self.links = [[0.0, 0.0, 0.0, 0.0, 0],
[-np.pi/2,0.0, 0.0, 0.0, 0],
[0.0, a2, d3, 0.0, 0],
[-np.pi/2,a3, d4, 0.0, 0],
[np.pi/2, 0.0, 0.0, 0.0, 0],
[-np.pi/2,0.0, 0.0, 0.0, 0]]
SerialLink.__init__(self,self.name,self.links)
#scara robot Friday 1402/2/8
class SCARA(SerialLink):
def __init__(self,name,l1,l2):
self.name = name
self.l1 = l1
self.l2 = l2
self.links = [[0.0, 0.0, 0.0, 0.0, 0],
[0.0, l1, 0.0, 0.0, 0],
[0.0, l2, 0.0, 0.0, 0],
[np.pi, 0.0, 0.0, 0.0, 1]]
SerialLink.__init__(self,self.name,self.links)
def invKin(self,T,type='r'):
results = []
theta123 = math.atan2(T[1][0],T[0][0])
d4 = -T[2][3]
x = T[0][3]
y = T[1][3]
c2 = (x*x+y*y-self.l1*self.l1-self.l2*self.l2)/(2*self.l1*self.l2)
if (c2 < -1 or c2 > 1):
print('invalid location')
return []
s2 = math.sqrt(1-c2*c2)
theta2 = math.atan2(s2,c2)
k1 = self.l1 + self.l2*c2
k2 = self.l2*s2
theta1 = math.atan2(y,x) - math.atan2(k2,k1)
theta3 = theta123 - theta1 - theta2
if type == 'r': #use radians
joints = [theta1,theta2,theta3,d4]
elif type == 'd': #use degrees
joints = [self.toDeg(theta1),self.toDeg(theta2),self.toDeg(theta3),d4]
results.append(joints)
s2 = -s2
theta2 = math.atan2(s2,c2)
k1 = self.l1 + self.l2*c2
k2 = self.l2*s2
theta1 = math.atan2(y,x) - math.atan2(k2,k1)
theta3 = theta123 - theta1 - theta2
if type == 'r': #use radians
joints = [theta1,theta2,theta3,d4]
elif type == 'd': #use degrees
joints = [self.toDeg(theta1),self.toDeg(theta2),self.toDeg(theta3),d4]
results.append(joints)
return results | AIUT-RoboticsToolbox | /AIUT_RoboticsToolbox-0.0.0.3-py3-none-any.whl/AIUT_RoboticsToolbox/Toolbox.py | Toolbox.py |
from datetime import datetime
from lxml.etree import _Element
from settings import NAMESPACES
def get_feature_type(timeslices: list) -> str:
"""
Returns the type of AIXM feature from the most recent timeslice
Args:
timeslices (list): A list of one or more AIXM timeslice
Returns:
feature_type (str): The AIXM feature type.
"""
try:
feature_type = timeslices[-1].find('.', NAMESPACES).tag
feature_type = feature_type.split('}')[-1].split('T')[0]
except IndexError:
feature_type = "Unknown"
return feature_type
def parse_timeslice(subroot: _Element) -> list:
"""Looks at the timeslices contained within the feature and arranges them in time order (oldest to newest).
Returns:
timeslices (list):
A list of timeslices in chronological order.
"""
# Get a list of timeslices
try:
timeslices = subroot.findall('.//aixm:timeSlice', NAMESPACES)
except IndexError:
timeslices = None
# Don't bother to sort if there is only one timeslice
if len(timeslices) > 1:
try:
timeslices.sort(key=lambda x: datetime.strptime(
x.find('.//{http://www.aixm.aero/schema/5.1}versionBegin').text.split('T')[0],
"%Y-%m-%d"))
except AttributeError:
pass
return timeslices
def convert_elevation(z_value: str, current_uom: str) -> float:
if z_value == 'GND':
current_uom = 'M'
z_value = 0.0
if z_value == 'UNL':
current_uom = 'M'
z_value = 18288.00 # 60,000 ft in metres
if current_uom == 'FL':
current_uom = 'M'
z_value = (float(z_value) * 100) * .3048 # 1 ft in metres
return z_value, current_uom
def altitude_mode(aixm_dict):
altitude_mode = 'absolute'
if aixm_dict['upper_layer_reference'] == 'SFC':
altitude_mode = 'relativetoground'
return altitude_mode
def switch_radius_uom(radius_uom):
if radius_uom == '[nmi_i]':
radius_uom = 'NM'
return radius_uom
def determine_geometry_type(aixm_feature_dict):
geometry_type = None
if aixm_feature_dict['type'] == 'RouteSegment':
geometry_type = 'LineString'
elif aixm_feature_dict['type'] == 'VerticalStructure':
if len(aixm_feature_dict['coordinates']) > 1:
aixm_feature_dict['lower_layer'] = 0.0
aixm_feature_dict['upper_layer'] = aixm_feature_dict['elevation']
geometry_type = 'Polygon'
else:
geometry_type = 'point'
elif len(aixm_feature_dict["coordinates"]) == 1:
if 'radius=' in aixm_feature_dict['coordinates'][0]:
if aixm_feature_dict["upper_layer"]:
geometry_type = 'cylinder'
else:
geometry_type = 'point'
elif len(aixm_feature_dict["coordinates"]) == 2:
for coordinate in aixm_feature_dict["coordinates"]:
if 'start=' in coordinate:
return 'polyhedron'
if geometry_type is None:
return 'linestring'
elif len(aixm_feature_dict["coordinates"]) > 2:
if aixm_feature_dict["upper_layer"]:
geometry_type = 'polyhedron'
else:
geometry_type = 'polygon'
return geometry_type | AIXMGeo | /AIXMGeo-0.0.2-py3-none-any.whl/aixm_geo/util.py | util.py |
from typing import Union
from lxml import etree
from pyproj import Geod
import util as util
from settings import NAMESPACES
class SinglePointAixm:
"""
A base class for all AIXM features who's representative geographic information is held in a single point.
Examples -
AirportHeliport - Geographic information is a single point (ARP)
DesignatedPoint - A single geographic point
"""
def __init__(self, root):
__slots__ = ['_root', '_timeslice']
self._root = root
self._timeslice = util.parse_timeslice(self._root)
def get_first_value(self, xpath: str, **kwargs: etree.Element) -> str:
"""Returns the first matching text value found within the subtree which match the Xpath provided.
Args:
xpath (str): Valid Xpath string for the value to try and find.
**kwargs:
subtree(etree.Element): The subtree to search. Defaults to self.root if no value provided.
Returns:
value (str): String value of the tag found.
"""
subtree = kwargs.pop('subtree', self._root)
try:
value = subtree.find(xpath, namespaces=NAMESPACES).text
if value is None:
raise AttributeError
except AttributeError:
value = "Unknown"
return value
def get_first_value_attribute(self, xpath: str, **kwargs: Union[etree.Element, str]) -> Union[str, dict]:
"""
Args:
xpath (str): Valid Xpath string for the tag to try and find.
**kwargs:
subtree (etree.Element): The subtree to search. Defaults to self.root if no value provided.
attribute_string (str): The attribute to search for.
Returns:
attribute (Union[str, dict]): The string attribute if attribute_string is defined.
If not, returns the full dict.
"""
subtree = kwargs.pop('subtree', self._root)
attribute_string = kwargs.pop('attribute_string', None)
if attribute_string:
try:
element = subtree.find(xpath, namespaces=NAMESPACES)
attribute = element.attrib[attribute_string]
except AttributeError:
attribute = "Unknown"
else:
try:
element = subtree.find(xpath, namespaces=NAMESPACES)
attribute = dict(element.attrib)
except AttributeError:
attribute = "Unknown"
return attribute
def get_field_elevation(self):
elevation = self.get_first_value('.//aixm:fieldElevation')
elevation_uom = self.get_first_value_attribute('.//aixm:fieldElevation', attribute_string='uom')
if elevation == 'Unknown':
elevation = 0
elevation_uom = 'M'
return elevation, elevation_uom
def get_vertical_extent(self):
elevation = self.get_first_value('.//aixm:elevation')
elevation_uom = self.get_first_value_attribute('.//aixm:elevation', attribute_string='uom')
if elevation == 'Unknown':
elevation = 0
elevation_uom = 'M'
return elevation, elevation_uom
def get_crs(self):
"""
Parses the CRS from the AirspaceGeometryComponent parent tag and returns the CRS as a string.
Args:
self
Returns:
crs(str): A string of 'Anticlockwise' or 'Clockwise' depending upon the CRS
applied and the start and end angles
"""
crs = self._timeslice[-1].xpath(".//*[@srsName]", namespaces=NAMESPACES)[0]
split = crs.get("srsName").split(':')[-1]
if split == '4326':
crs = '4326'
elif split == 'CRS84':
crs = 'CRS84'
else:
raise TypeError(print('Only CRS84 and ESPG::4326 supported.'))
return crs
class MultiPointAixm(SinglePointAixm):
"""
Extends SinglePointAixm for use with features who's geographic representation is often represented by multiple
points which make up polygons etc.
Examples
Airspace
RouteSegment
"""
def __init__(self, root):
super().__init__(root)
def get_airspace_elevation(self):
lower_layer = self.get_first_value('.//aixm:theAirspaceVolume//aixm:lowerLimit')
lower_layer_uom = self.get_first_value_attribute('.//aixm:theAirspaceVolume//aixm:lowerLimit',
attribute_string='uom')
upper_layer = self.get_first_value('.//aixm:theAirspaceVolume//aixm:upperLimit')
upper_layer_uom = self.get_first_value_attribute('.//aixm:theAirspaceVolume//aixm:upperLimit',
attribute_string='uom')
if lower_layer == 'Unknown':
lower_layer = 0.0
lower_layer_uom = 'M'
if upper_layer == 'Unknown':
upper_layer = 0.0
upper_layer_uom = 'M'
return lower_layer, lower_layer_uom, upper_layer, upper_layer_uom
def get_coordinate_list(self, subroot):
"""
Parses the LXML etree._Element object and returns a list of coordinate strings.
Args:
subroot: LXML etree._Element object
Returns:
unpacked_gml(list[str]): A list of coordinate strings
"""
unpacked_gml = None
for location in subroot:
try:
unpacked_gml = self.unpack_gml(location)
except TypeError:
print('Coordinates can only be extracted from an LXML etree._Element object.')
for x in unpacked_gml:
x.strip("r'\'")
return unpacked_gml
def unpack_gml(self, location: etree.Element) -> list[str]:
"""
Args:
location(etree.Element): etree.Element containing specific aixm tags containing geographic information
kwargs(str): crs=None the CRS to be used for determining arc directions for ArcByCenterPoint.
Returns:
coordinate_string(str): A coordinate string
"""
coordinate_list = []
for x in self.extract_pos_and_poslist(location):
coordinate_list.append(x)
return coordinate_list
def extract_pos_and_poslist(self, location):
"""
Args:
self
Returns:
coordinate_string(str): A coordinate string
"""
for child in location.iterdescendants():
tag = child.tag.split('}')[-1]
if tag == 'GeodesicString' or tag == 'ElevatedPoint':
for x in self.unpack_geodesic_string(child):
yield x
elif tag == 'CircleByCenterPoint':
yield self.unpack_circle(child)
elif tag == 'ArcByCenterPoint':
yield self.unpack_arc(child)
def unpack_geodesic_string(self, location):
for child in location.iterdescendants():
tag = child.tag.split('}')[-1]
if tag == 'pos':
yield child.text
elif tag == 'posList':
for x in self.unpack_pos_list(child.text):
yield x
def unpack_pos_list(self, string_to_manipulate):
split = string_to_manipulate.split(' ')
if len(split) > 1:
for i in range(len(split) - 1):
if i < len(split) and i % 2 == 0:
new_string = f'{split[i]} {split[i + 1]}'
yield new_string
else:
yield string_to_manipulate
def unpack_arc(self, location: etree.Element) -> str:
"""
Args:
location(etree.Element): etree.Element containing specific aixm tags containing geographic information
crs(str): CRS to be used for determining arc directions for ArcByCenterPoint.
Returns:
coordinate_string(str): A coordinate string
"""
centre = self.get_arc_centre_point(location).strip()
start_angle = self.get_first_value('.//gml:startAngle', subtree=location)
end_angle = self.get_first_value('.//gml:endAngle', subtree=location)
# Pyproj uses metres, we will have to convert for distance
radius = self.get_first_value('.//gml:radius', subtree=location)
radius_uom = self.get_first_value_attribute('.//gml:radius', subtree=location, attribute_string='uom')
conversion_dict = {'ft': 0.3048, 'NM': 1852, '[nmi_i]': 1852, 'mi': 1609.4, 'km': 1000}
if radius_uom != 'm':
radius = float(radius) * conversion_dict[radius_uom]
lat = centre.split(' ')[0]
lon = centre.split(' ')[1]
start_coord = Geod(ellps='WGS84').fwd(lon, lat, start_angle, radius)
end_coord = Geod(ellps='WGS84').fwd(lon, lat, end_angle, radius)
coordinate_string = f'start={round(start_coord[1], 5)} {round(start_coord[0], 5)},' \
f' end={round(end_coord[1], 5)} {round(end_coord[0], 5)}, centre={centre},' \
f' direction={self.determine_arc_direction(float(start_angle), float(end_angle))}'
return coordinate_string
def get_arc_centre_point(self, location):
centre = self.get_first_value('.//gml:pos', subtree=location)
# If none, check for gml:posList instead
if centre == 'Unknown':
centre = self.get_first_value('.//gml:posList', subtree=location)
return centre
def get_circle_centre_point(self, location):
centre = self.get_first_value('.//gml:pos', subtree=location)
# If none, check for gml:posList instead
if centre == 'Unknown':
centre = self.get_first_value('.//gml:posList', subtree=location)
return centre
def determine_arc_direction(self, start_angle: float, end_angle: float) -> str:
"""
Args:
start_angle(float): Start angle of the arc from it's centre point.
end_angle(float): End angle of the arc from it's centre point.
crs(str): The CRS being used by the AIXM feature.
Returns:
direction(str): Clockwise or Anticlockwise
"""
crs = self.get_crs()
if crs == '4326':
if start_angle < end_angle:
direction = 'clockwise'
else:
direction = 'anticlockwise'
elif crs == 'CRS84':
if start_angle < end_angle:
direction = 'anticlockwise'
else:
direction = 'clockwise'
else:
raise TypeError(print('Only ESPG::4326 and CRS84 are supported.'))
return direction
def unpack_linestring(self, location: etree.Element) -> str:
"""
Args:
location(etree.Element): etree.Element containing specific aixm tags containing geographic information
Returns:
coordinate_string(str): A coordinate string.
"""
coordinate_string = self.get_first_value('.//aixm:Point//gml:pos', subtree=location)
if coordinate_string == 'Unknown':
coordinate_string = self.get_first_value('.//aixm:Point//gml:posList', subtree=location)
return coordinate_string
def unpack_circle(self, location: etree.Element) -> str:
"""
Args:
location(etree.Element): etree.Element containing specific aixm tags containing geographic information
Returns:
coordinate_string(str): A coordinate string
"""
centre = self.get_circle_centre_point(location)
radius = self.get_first_value('.//gml:radius', subtree=location)
radius_uom = self.get_first_value_attribute('.//gml:radius', subtree=location, attribute_string='uom')
coordinate_string = f'{centre}, radius={radius}, radius_uom={radius_uom}'
return coordinate_string
def get_elevation(self):
lower_layer = self.get_first_value('.//aixm:theAirspaceVolume//aixm:lowerLimit')
lower_layer_uom = self.get_first_value_attribute('.//aixm:theAirspaceVolume//aixm:lowerLimit',
attribute_string='uom')
upper_layer = self.get_first_value('.//aixm:theAirspaceVolume//aixm:upperLimit')
upper_layer_uom = self.get_first_value_attribute('.//aixm:theAirspaceVolume//aixm:upperLimit',
attribute_string='uom')
if lower_layer == 'Unknown':
lower_layer = 0
lower_layer_uom = 'FT'
if upper_layer == 'Unknown':
upper_layer = 0
upper_layer_uom = 'FT'
return lower_layer, lower_layer_uom, upper_layer, upper_layer_uom | AIXMGeo | /AIXMGeo-0.0.2-py3-none-any.whl/aixm_geo/base.py | base.py |
from pathlib import Path
from kmlplus import kml
import util as util
from factory import AixmFeatureFactory
class AixmGeo:
__slots__ = ('aixm_file', 'output_path', 'file_name')
def __init__(self, aixm_file, output_path, file_name):
self.aixm_file = aixm_file
self.output_path = output_path
self.file_name = file_name
def build_kml(self):
kml_obj = kml.KmlPlus(output=self.output_path, file_name=self.file_name)
self.draw_features(kml_obj)
def draw_features(self, kml_obj):
for aixm_feature_obj in AixmFeatureFactory(self.aixm_file):
aixm_feature_dict = aixm_feature_obj.get_geographic_information()
if aixm_feature_dict:
geometry_type = util.determine_geometry_type(aixm_feature_dict)
if geometry_type == 'cylinder':
self.draw_cylinder(aixm_feature_dict, kml_obj)
elif geometry_type == 'point':
if aixm_feature_dict['type'] == 'VerticalStructure':
self.draw_vertical_structure_point(aixm_feature_dict, kml_obj)
else:
kml_obj.point(aixm_feature_dict["coordinates"], fol=aixm_feature_dict['name'],
point_name=aixm_feature_dict['name'])
elif geometry_type == 'polyhedron':
self.draw_airspace(aixm_feature_dict, kml_obj)
print(aixm_feature_dict)
else:
pass
def draw_vertical_structure_point(self, aixm_feature_dict, kml_obj):
kml_obj.point(aixm_feature_dict["coordinates"], uom=aixm_feature_dict['elevation_uom'],
fol=aixm_feature_dict['name'], point_name=aixm_feature_dict['name'],
altitude_mode='relativeToGround', extrude=1)
def draw_airspace(self, aixm_feature_dict, kml_obj):
kml_obj.polyhedron(aixm_feature_dict["coordinates"],
aixm_feature_dict["coordinates"],
upper_layer=float(aixm_feature_dict['upper_layer']),
lower_layer=float(aixm_feature_dict['lower_layer']),
uom=aixm_feature_dict['lower_layer_uom'], fol=aixm_feature_dict['name'],
altitude_mode=util.altitude_mode(aixm_feature_dict))
def draw_cylinder(self, aixm_feature_dict, kml_obj):
coordinates = aixm_feature_dict['coordinates'][0].split(',')[0].strip()
radius = aixm_feature_dict['coordinates'][0].split(',')[1].split('=')[-1]
radius_uom = util.switch_radius_uom(aixm_feature_dict['coordinates'][0].split(',')[2].split('=')[-1])
lower_layer = aixm_feature_dict['lower_layer']
upper_layer = aixm_feature_dict['upper_layer']
kml_obj.cylinder(coordinates, float(radius),
radius_uom=radius_uom, lower_layer=float(lower_layer),
upper_layer=float(upper_layer),
fol=aixm_feature_dict['name'], lower_layer_uom=aixm_feature_dict['lower_layer_uom'],
upper_layer_uom=aixm_feature_dict['upper_layer_uom'],
altitude_mode=util.altitude_mode(aixm_feature_dict))
if __name__ == '__main__':
file_loc = Path().absolute().joinpath('..', Path('test_data/donlon.xml'))
output = Path().absolute()
AixmGeo(file_loc, output, 'test_kml.kml').build_kml() | AIXMGeo | /AIXMGeo-0.0.2-py3-none-any.whl/aixm_geo/aixm_geo.py | aixm_geo.py |
import util
from base import SinglePointAixm, MultiPointAixm
from interfaces import IAixmFeature
from settings import NAMESPACES
class AirportHeliport(SinglePointAixm, IAixmFeature):
def __init__(self, root):
super().__init__(root)
def get_geographic_information(self):
"""
Args:
self
Returns:
geo_dict(dict): A dictionary containing relevant information regarding the feature.
"""
elevation, elevation_uom = self.get_field_elevation()
if elevation_uom != 'M':
elevation = util.convert_elevation(elevation, elevation_uom)
elevation_uom = 'M'
geo_dict = {
'type': 'AirportHeliport',
'coordinates': [f"{self.get_first_value('.//aixm:ARP//gml:pos')} "
f"{elevation}"],
'elevation': elevation,
'elevation_uom': elevation_uom,
'name': f'{self.get_first_value(".//aixm:designator")} ({self.get_first_value(".//aixm:name")})',
}
return geo_dict
class NavaidComponent(SinglePointAixm, IAixmFeature):
def __init__(self, root):
super().__init__(root)
def get_geographic_information(self):
"""
Args:
self
Returns:
geo_dict(dict): A dictionary containing relevant information regarding the feature.
"""
elevation, elevation_uom = self.get_elevation()
if elevation_uom != 'M':
elevation = util.convert_elevation(elevation, elevation_uom)
elevation_uom = 'M'
geo_dict = {
'type': 'NavaidComponent',
'coordinates': [f"{self.get_first_value('.//aixm:location//gml:pos')}"
f" {elevation}"],
'elevation': elevation,
'elevation_uom': elevation_uom,
'name': f'{self.get_first_value(".//aixm:designator")}({self.get_first_value(".//aixm:name")})' \
f' {self.get_first_value(".//aixm:type")}'
}
return geo_dict
class DesignatedPoint(SinglePointAixm, IAixmFeature):
def __init__(self, root):
super().__init__(root)
def get_geographic_information(self):
"""
Args:
self
Returns:
geo_dict(dict): A dictionary containing relevant information regarding the feature.
"""
geo_dict = {
'type': 'DesignatedPoint',
'name': self.get_first_value('.//aixm:name'),
'coordinates': [self.get_first_value('.//aixm:location//gml:pos')]
}
return geo_dict
class RouteSegment(MultiPointAixm, IAixmFeature):
def __init__(self, root):
super().__init__(root)
def get_geographic_information(self) -> dict:
"""
Args:
self
Returns:
geo_dict(dict): A dictionary containing relevant information regarding the feature.
"""
coordinate_list = []
root = self._root.iterfind('.//aixm:curveExtent', namespaces=NAMESPACES)
for location in root:
for x in self.extract_pos_and_poslist(location):
coordinate_list.append(x)
geo_dict = {
'type': 'RouteSegment',
'coordinates': coordinate_list,
# TODO add name for route segments
}
return geo_dict
class Airspace(MultiPointAixm, IAixmFeature):
def __init__(self, root):
super().__init__(root)
def get_geographic_information(self):
"""
Args:
self
Returns:
geo_dict(dict): A dictionary containing relevant information regarding the feature.
"""
subroot = self._root.findall('.//aixm:AirspaceGeometryComponent',
namespaces=NAMESPACES)
coordinate_list = self.get_coordinate_list(subroot)
lower_layer, lower_layer_uom, upper_layer, upper_layer_uom = self.get_airspace_elevation()
lower_layer, lower_layer_uom = util.convert_elevation(lower_layer, lower_layer_uom)
upper_layer, upper_layer_uom = util.convert_elevation(upper_layer, upper_layer_uom)
geo_dict = {
'type': 'Airspace',
'upper_layer': upper_layer,
'upper_layer_uom': upper_layer_uom,
'lower_layer': lower_layer,
'lower_layer_uom': lower_layer_uom,
'name': f"{self.get_first_value('.//aixm:designator')} ({self.get_first_value('.//aixm:name')})",
'coordinates': coordinate_list,
'upper_layer_reference': self.get_first_value('.//aixm:upperLimitReference'),
}
return geo_dict
class VerticalStructure(MultiPointAixm, IAixmFeature):
def __init__(self, root):
super().__init__(root)
def get_geographic_information(self):
"""
Args:
self
Returns:
geo_dict(dict): A dictionary containing relevant information regarding the feature.
"""
subroot = self._root.findall('.//aixm:part',
namespaces=NAMESPACES)[0]
elevation, elevation_uom = self.get_vertical_extent()
elevation, elevation_uom = util.convert_elevation(elevation, elevation_uom)
coordinate_list = self.get_coordinate_list(subroot)
if len(coordinate_list) == 1:
coordinate_list[0] = f'{coordinate_list[0]} {elevation}'
geo_dict = {
'type': 'VerticalStructure',
'obstacle_type': self.get_first_value('.//aixm:type'),
'coordinates': coordinate_list,
'elevation': elevation,
'elevation_uom': elevation_uom,
'name': f'{self.get_first_value(".//aixm:name")} ({self.get_first_value(".//aixm:type")})',
}
return geo_dict | AIXMGeo | /AIXMGeo-0.0.2-py3-none-any.whl/aixm_geo/aixm_features.py | aixm_features.py |
from __future__ import absolute_import, division, print_function, unicode_literals
from utils import time
import tensorflow as tf
import os
class GRU(object):
'''
GRU生成模式
'''
def __init__(self, vocab_size, embedding_dim, rnn_units, batch_size, buffer_size=10000,
checkpoint_dir='./training_checkpoints'):
'''
创建模型
:param vocab_size: 词汇数,所有特征的数量
:param embedding_dim: 词嵌入维度
:param rnn_units: 隐藏层节点数
:param batch_size: 批次
:param dataset: 数据
:param buffer_size: 数据缓存区大小
'''
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.rnn_units = rnn_units
self.batch_size = batch_size
self.buffer_size = buffer_size
# 默认
self.checkpoint_dir = checkpoint_dir
self.checkpoint_prefix = os.path.join(self.checkpoint_dir, 'ckpt_{epoch}')
def loss(self, labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def __call__(self, dataset):
self.dataset = dataset
self.optimizer = tf.keras.optimizers.Adam()
self.model = self.build_model()
return self.model
def build_model(self, vocab_size='', embedding_dim='', rnn_units='', batch_size=0):
"""构建模型并返回"""
# vocab_size 不设置则拿初始化值
vocab_size = vocab_size or self.vocab_size
embedding_dim = embedding_dim or self.embedding_dim
rnn_units = rnn_units or self.rnn_units
batch_size = batch_size or self.batch_size
model = tf.keras.Sequential([
# embbeding层
tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),
# GRU模型
tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True,
recurrent_initializer='glorot_uniform'),
# 线性层调整输出维度
tf.keras.layers.Dense(vocab_size)
])
return model
# 定义损失函数
def loss(labels, logits):
"""返回损失函数对象"""
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def train_step(self, inp, target):
with tf.GradientTape() as tape:
predictions = self.model(inp)
loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(target, predictions, from_logits=True))
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss
def train(self, epochs: int = 10):
for epoch in range(epochs):
start = time.time()
hidden = self.model.reset_states()
for (batch_n, (inp, target)) in enumerate(self.dataset):
loss = self.train_step(inp, target)
if batch_n % 100 == 0:
template = 'Epoch {} Batch {} Loss {}'
print(template.format(epoch + 1, batch_n, loss))
if (epoch + 1) % 5 == 0:
self.model.save_weights(self.checkpoint_prefix.format(epoch=epoch + 1))
print('Epoch {} Loss {:.4f}'.format(epoch + 1, loss))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
self.model.save_weights(self.checkpoint_prefix.format(epoch=epoch + 1)) # 保存最后一次
return self.batch_size
def loadModel(self, vocab_size='', embedding_dim='', rnn_units='', batch_size=0):
vocab_size = vocab_size or self.vocab_size
embedding_dim = embedding_dim or self.embedding_dim
rnn_units = rnn_units or self.rnn_units
batch_size = batch_size or self.batch_size
print(batch_size)
self.model = self.build_model(vocab_size, embedding_dim, rnn_units, batch_size)
# 加载model
self.model.load_weights(tf.train.latest_checkpoint(checkpoint_dir=self.checkpoint_dir))
return self.model | AIer | /AIer-0.0.2.tar.gz/AIer-0.0.2/zbmain/models/tf_gru.py | tf_gru.py |
import hashlib
import http.client
import json
import random
import urllib
from urllib.request import urlopen
__all__ = ['__getBaiduTranslateConfigFromJson','BaiduTranslate','YoudaoTranslate','GoogleTranslate']
def __getBaiduTranslateConfigFromJson(configUrl=''):
'''
json格式:
{
"translate":{
"baidu":{
"appid":"",
"secretKey":""
},
"google":{
"appid":"",
"secretKey":""
},"youdao":{
"appid":"",
"secretKey":""
}
}
}
:param configUrl:
:return:
'''
configJSON = configUrl or 'https://zbmain.com/files/others/config.json'
resultJSON = json.loads(urlopen(configJSON).read())
return resultJSON['translate']['baidu']['appid'], resultJSON['translate']['baidu']['secretKey']
class BaiduTranslate():
def __init__(self, appid, secretKey, fromLang='en', toLang='cn', apiUrl=''):
'''
appid、secretKey自行前往官方注册
:param appid:
:param secretKey:
:param fromLang: 翻译器的源语种,默认英文
:param toLang: 翻译器的目标语种,默默中文
:param apiUrl: api地址,默认空,若官方更新接口可新设置
'''
self.apiUrl = apiUrl or '/api/trans/vip/translate'
self.appid = appid
self.secretKey = secretKey
self.fromLang = fromLang
self.toLang = toLang
def __call__(self, text, fromLang='', toLang=''):
'''
:param text: 翻译输入
:param fromLang: 临时源语种【可选】
:param toLang: 临时目标语种【可选】
:return: (是否成功,输出,输入)
'''
fromLang = fromLang or self.fromLang
toLang = toLang or self.toLang
salt = str(random.randint(32768, 65536))
sign = self.appid + text + salt + self.secretKey
sign = hashlib.md5(sign.encode(encoding='utf-8')).hexdigest()
requestUrl = self.apiUrl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(
text) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + salt + '&sign=' + sign
try:
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', requestUrl)
# response是HTTPResponse对象
response = httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
return True, result["trans_result"][0]["dst"], result["trans_result"][0]["src"]
except Exception as e:
return False, e
finally:
if httpClient:
httpClient.close()
class YoudaoTranslate():
def __init__(self):
print('To be updated')
class GoogleTranslate():
def __init__(self):
print('To be updated')
if __name__ == "__main__":
appid, secretKey = __getBaiduTranslateConfigFromJson()
baiduTranslate = BaiduTranslate(appid, secretKey, 'auto', 'en')
print(baiduTranslate('你好,世界!')) | AIer | /AIer-0.0.2.tar.gz/AIer-0.0.2/zbmain/utils/translate.py | translate.py |
from numpy import argmax
class OneHot(object):
'''
onehot生成器
编码:call() || encode()
解码:decode()
'''
def __init__(self):
# 总类别特征列表
self.__class_lst = []
# 编码映射表
self.__char_to_num = []
# 源码映射表
self.__num_to_char = []
# onehot编码列表
self.__onehot_encoded = []
def __call__(self, sourceList:list, classList:list=None):
'''
列表 转 onehot编码
:param sourceList: 源列表
:param classList: 源列表总特征表。缺省:None(则等于源列表)
:return:onehot编码列表
'''
return self.encode(sourceList, classList)
def encode(self, sourceList:list, classList:list=None):
'''
列表 转 onehot编码(与call方法等价)
:param sourceList: 源列表
:param classList: 源列表总特征表。缺省:None(None则为源列表)
:return:onehot编码列表
'''
self.__class_lst = classList or sourceList #没有指定总类型表,则当前列表为总类型
self.__char_to_num = dict((c, n) for n, c in enumerate(self.__class_lst))
self.__num_to_char = dict((n, c) for n, c in enumerate(self.__class_lst))
integer_encoded = [self.__char_to_num[char] for char in sourceList]
# onehot编码数组
self.__onehot_encoded = []
for value in integer_encoded:
letter = [0 for _ in range(len(self.__class_lst))]
letter[value] = 1
self.__onehot_encoded.append(letter)
return self.__onehot_encoded
def decode(self, onehotNode:list):
'''
onehot编码元素 转 源列表元素
:param onehotNode: onehot编码返回的元素
:return:源列表元素
:example: decode([1,0,0])
'''
return self.__num_to_char[argmax(onehotNode)]
def getNodeOneHot(self, char:str):
'''
源列表元素 获取 onehot编码元素
:param char: 编码源元素
:return: 该元素的onehot编码
'''
return self.__onehot_encoded[self.__char_to_num[char]]
@property
def onehotCode(self):
'''获取onehot码'''
return self.__onehot_encoded
if __name__ == "__main__":
onehot = OneHot()
source = ['a', 'b', 'c', 'd']
onehot_list = onehot(source)
print(onehot_list)
print(onehot.getNodeOneHot(source[1]))
print(onehot.decode(onehot_list[1])) | AIer | /AIer-0.0.2.tar.gz/AIer-0.0.2/zbmain/utils/onehot.py | onehot.py |
from .aifes_code_generator.aifes_code_creator import AifesCodeGenerator
from .pytorch_extractor.pytorch_extractor import PytorchExtractor
from .support.aifes_model import AifesType
import numpy as np
from torch.nn import Module
def convert_to_fnn_f32_express(pytorch_model: Module, output_path: str, with_weights=True):
"""
Converts the given PyTorch model to AIfES Express F32. Creates one header file with the model in the output_path that can be included to any AIfES project. If weights are extracted as well, a second header file is created, which contains the flattened weights of the PyTorch model.
:param pytorch_model: PyTorch model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param with_weights: Extracts the weights and bias from the given model and creates a separate header file with flatten weights.
"""
pytorch_extractor = PytorchExtractor(pytorch_model)
aifestype = AifesType.EXPRESS
aifescode = AifesCodeGenerator(pytorch_extractor, 'default', aifestype, output_path, with_weights)
aifescode.run_f32()
def convert_to_fnn_q7_express(pytorch_model: Module, output_path: str, representative_data: np.ndarray,
target_alignment: int, byteorder: str):
"""
Converts the given PyTorch model to AIfES Express Q7. Creates one header file with the model in the output_path that can be included to any AIfES project. A second header file is created, which contains the flattened weights of the PyTorch model. This function converts the given PyTorch model to Q7. It needs representative data to calculate the quantization parameters.
:param pytorch_model: PyTorch model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param representative_data: Representative data of the input data of the given PyTorch model. The data is needed to calculate the quantization parameters for the hidden layers.
:param target_alignment: Alignment of the created flatbuffer depending on target architecture (1, 2, or 4 Bytes). E.g., for ARM Cortex M4 it is 4, which corresponds to 4 Bytes as it has a 32 Bit storage, for AVR Arduino it is 2, as the memory is organized as 16 Bit (2 Bytes)
:param byteorder: Byte order of target system, i.e., 'little' for little endian or 'big' for big endian.
"""
pytorch_extractor = PytorchExtractor(pytorch_model, use_transposed_layers=False)
aifestype = AifesType.EXPRESS
aifescode = AifesCodeGenerator(pytorch_extractor, 'default', aifestype, output_path, with_weights=True)
aifescode.run_q7(representative_data, target_alignment, byteorder)
def convert_to_fnn_f32(pytorch_model: Module, output_path: str, with_weights=True):
"""
Converts the given PyTorch model to AIfES F32 (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. If weights are extracted as well, a second header file is created, which contains the flattened weights of the PyTorch model.
:param pytorch_model: PyTorch model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param with_weights: Extracts the weights and bias from the given model and creates a separate header file with flattened weights.
"""
pytorch_extractor = PytorchExtractor(pytorch_model)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(pytorch_extractor, 'default', aifestype, output_path, with_weights)
aifescode.run_f32()
def convert_to_fnn_f32_cmsis(pytorch_model: Module, output_path: str, with_weights=True):
"""
Converts the given PyTorch model to AIfES F32 CMSIS implementation (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. If weights are extracted as well, a second header file is created, which contains the flattened weights of the PyTorch model.
:param pytorch_model: PyTorch model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param with_weights: Extracts the weights and bias from the given model and creates separate header file wih flatten weights.
"""
pytorch_extractor = PytorchExtractor(pytorch_model)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(pytorch_extractor, 'cmsis', aifestype, output_path, with_weights)
aifescode.run_f32()
def convert_to_fnn_q7(pytorch_model: Module, output_path: str, representative_data: np.ndarray,
target_alignment: int, byteorder: str, transpose=True):
"""
Converts the given PyTorch model to AIfES Q7 implementation (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. A second header file is created, which contains the flattened weights of the PyTorch model. This function converts the given PyTorch model to Q7. It needs representative data to calculate the quantization parameters.
:param pytorch_model: PyTorch model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param representative_data: Representative data of the input data of the given PyTorch model. The data is needed to calculate the quantization parameters for the hidden layers.
:param target_alignment: Alignment of the created flatbuffer depending on target architecture (1, 2, or 4 Bytes). E.g., for ARM Cortex M4 it is 4, which corresponds to 4 Bytes as it has a 32 Bit storage, for AVR Arduino it is 2, as the memory is organized as 16 Bit (2 Bytes)
:param byteorder: Byte order of target system, i.e., 'little' for little endian or 'big' for big endian.
:param transpose: When transpose=True the weights of the layers are transposed, so that the weights for each neuron are next to each other in memory. This can improve the performance of the ANN. Default is therefore 'True'.
"""
pytorch_extractor = PytorchExtractor(pytorch_model, transpose)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(pytorch_extractor, 'default', aifestype, output_path, with_weights=True)
aifescode.run_q7(representative_data, target_alignment, byteorder)
def convert_to_fnn_q7_cmsis(pytorch_model: Module, output_path: str, representative_data: np.ndarray,
target_alignment: int, byteorder: str):
"""
Converts the given PyTorch model to AIfES Q7 implementation with CMSIS support (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. A second header file is created, which contains the flattened weights of the PyTorch model. This function converts the given PyTorch model to Q7. It needs representative data to calculate the quantization parameters.
:param pytorch_model: PyTorch model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param representative_data: Representative data of the input data of the given PyTorch model. Is needed to calculate the quantization parameters for the hidden layers.
:param target_alignment: Alignment of the created flatbuffer depending on target architecture (1, 2, or 4 Bytes). E.g., for ARM Cortex M4 it is 4, which corresponds to 4 Bytes as it has a 32 Bit storage, for AVR Arduino it is 2, as the memory is organized as 16 Bit (2 Bytes)
:param byteorder: Byte order of target system, i.e., 'little' for little endian or 'big' for big endian.
"""
pytorch_extractor = PytorchExtractor(pytorch_model, use_transposed_layers=True)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(pytorch_extractor, 'cmsis', aifestype, output_path, with_weights=True)
aifescode.run_q7(representative_data, target_alignment, byteorder) | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/pytorch2aifes.py | pytorch2aifes.py |
from .aifes_code_generator.aifes_code_creator import AifesCodeGenerator
from .keras_extractor.keras_extractor import KerasExtractor
from .support.aifes_model import AifesType
import numpy as np
from tensorflow import keras
def convert_to_fnn_f32_express(keras_model: keras.Model, output_path: str, with_weights=True):
"""
Converts the given Keras model to AIfES Express F32. Creates one header file with the model in the output_path that can be included to any AIfES project. If weights are extracted as well, a second header file is created, which contains the flattened weights of the Keras model.
:param keras_model: Keras model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param with_weights: Extracts the weights and bias from the given model and creates a separate header file with flatten weights.
"""
keras_extractor = KerasExtractor(keras_model)
aifestype = AifesType.EXPRESS
aifescode = AifesCodeGenerator(keras_extractor, 'default', aifestype, output_path, with_weights)
aifescode.run_f32()
def convert_to_fnn_q7_express(keras_model: keras.Model, output_path: str, representative_data: np.ndarray,
target_alignment: int, byteorder: str):
"""
Converts the given Keras model to AIfES Express Q7. Creates one header file with the model in the output_path that can be included to any AIfES project. A second header file is created, which contains the flattened weights of the Keras model. This function converts the given Keras model to Q7. It needs representative data to calculate the quantization parameters.
:param keras_model: Keras model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param representative_data: Representative data of the input data of the given Keras model. The data is needed to calculate the quantization parameters for the hidden layers.
:param target_alignment: Alignment of the created flatbuffer depending on target architecture (1, 2, or 4 Bytes). E.g., for ARM Cortex M4 it is 4, which corresponds to 4 Bytes as it has a 32 Bit storage, for AVR Arduino it is 2, as the memory is organized as 16 Bit (2 Bytes)
:param byteorder: Byte order of target system, i.e., 'little' for little endian or 'big' for big endian.
"""
keras_extractor = KerasExtractor(keras_model, use_transposed_layers=False)
aifestype = AifesType.EXPRESS
aifescode = AifesCodeGenerator(keras_extractor, 'default', aifestype, output_path, with_weights=True)
aifescode.run_q7(representative_data, target_alignment, byteorder)
def convert_to_fnn_f32(keras_model: keras.Model, output_path: str, with_weights=True):
"""
Converts the given Keras model to AIfES F32 (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. If weights are extracted as well, a second header file is created, which contains the flattened weights of the Keras model.
:param keras_model: Keras model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param with_weights: Extracts the weights and bias from the given model and creates a separate header file with flattened weights.
"""
keras_extractor = KerasExtractor(keras_model, use_transposed_layers=False)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(keras_extractor, 'default', aifestype, output_path, with_weights)
aifescode.run_f32()
def convert_to_fnn_f32_cmsis(keras_model: keras.Model, output_path: str, with_weights=True):
"""
Converts the given Keras model to AIfES F32 CMSIS implementation (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. If weights are extracted as well, a second header file is created, which contains the flattened weights of the Keras model.
:param keras_model: Keras model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param with_weights: Extracts the weights and bias from the given model and creates separate header file wih flatten weights.
"""
keras_extractor = KerasExtractor(keras_model)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(keras_extractor, 'cmsis', aifestype, output_path, with_weights)
aifescode.run_f32()
def convert_to_fnn_q7(keras_model: keras.Model, output_path: str, representative_data: np.ndarray,
target_alignment: int, byteorder: str, transpose=True):
"""
Converts the given Keras model to AIfES Q7 implementation (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. A second header file is created, which contains the flattened weights of the Keras model. This function converts the given Keras model to Q7. It needs representative data to calculate the quantization parameters.
:param keras_model: Keras model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param representative_data: Representative data of the input data of the given Keras model. The data is needed to calculate the quantization parameters for the hidden layers.
:param target_alignment: Alignment of the created flatbuffer depending on target architecture (1, 2, or 4 Bytes). E.g., for ARM Cortex M4 it is 4, which corresponds to 4 Bytes as it has a 32 Bit storage, for AVR Arduino it is 2, as the memory is organized as 16 Bit (2 Bytes)
:param byteorder: Byte order of target system, i.e., 'little' for little endian or 'big' for big endian.
:param transpose: When transpose=True the weights of the layers are transposed, so that the weights for each neuron are next to each other in memory. This can improve the performance of the ANN. Default is therefore 'True'.
"""
keras_extractor = KerasExtractor(keras_model, transpose)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(keras_extractor, 'default', aifestype, output_path, with_weights=True)
aifescode.run_q7(representative_data, target_alignment, byteorder)
def convert_to_fnn_q7_cmsis(keras_model: keras.Model, output_path: str, representative_data: np.ndarray,
target_alignment: int, byteorder: str):
"""
Converts the given Keras model to AIfES Q7 implementation with CMSIS support (non-express version). Creates one header file with the model in the output_path that can be included to any AIfES project. A second header file is created, which contains the flattened weights of the Keras model. This function converts the given Keras model to Q7. It needs representative data to calculate the quantization parameters.
:param keras_model: Keras model, which should be converted
:param output_path: File path, where the converted model should be stored. If the folder doesn't exist, it will be created by this function.
:param representative_data: Representative data of the input data of the given Keras model. Is needed to calculate the quantization parameters for the hidden layers.
:param target_alignment: Alignment of the created flatbuffer depending on target architecture (1, 2, or 4 Bytes). E.g., for ARM Cortex M4 it is 4, which corresponds to 4 Bytes as it has a 32 Bit storage, for AVR Arduino it is 2, as the memory is organized as 16 Bit (2 Bytes)
:param byteorder: Byte order of target system, i.e., 'little' for little endian or 'big' for big endian.
"""
keras_extractor = KerasExtractor(keras_model, use_transposed_layers=True)
aifestype = AifesType.NORMAL
aifescode = AifesCodeGenerator(keras_extractor, 'cmsis', aifestype, output_path, with_weights=True)
aifescode.run_q7(representative_data, target_alignment, byteorder) | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/keras2aifes.py | keras2aifes.py |
from typing import List
import numpy as np
from enum import Enum
# Enum of the different types of layers
class Layer(Enum):
DENSE = 1
DENSE_WT = 11 # Dense layer with transposed weights matrix
ELU = 2
INPUT = 3
LEAKY_RELU = 4
LINEAR = 5
RELU = 6
SIGMOID = 7
SOFTMAX = 8
SOFTSIGN = 9
TANH = 10
# Definition of activation layers
act_layer = [Layer.ELU, Layer.LINEAR, Layer.LEAKY_RELU, Layer.SOFTSIGN, Layer.SOFTMAX, Layer.SIGMOID, Layer.RELU,
Layer.TANH]
# Definition of configurable activation layers
configurable_act_layer = [Layer.ELU, Layer.LEAKY_RELU]
# Enum of different data types
class Dtype(Enum):
FLOAT32 = 1
Q31 = 2
Q7 = 3
# Enum of different AIfES Frontends
class AifesType(Enum):
EXPRESS = 1
NORMAL = 2
# Dictionary to convert from Dtype to AIfES specific names
dtype_to_aifes = {Dtype.FLOAT32: 'f32', Dtype.Q31: 'q31', Dtype.Q7: 'q7'}
# Super class for AifesLayer containing common variables
class AifesLayer:
def __init__(self, layer_type: Layer, layer_name: str, input_shape: np.ndarray, output_shape: np.ndarray):
self.layer_type = layer_type
# Layer name
self.layer_name = layer_name
# Input Shape
self.input_shape = input_shape
# Output Shape
self.output_shape = output_shape
self.init_macro = None
# Add print options for easier debugging
def __str__(self):
output_str = "Layer Type: "
output_str += str(self.layer_type) + ", " + self.layer_name
output_str += "; Input Shape: " + str(self.input_shape)
output_str += "; Output Shape: " + str(self.output_shape)
return output_str
# Type dependent class for each layer type with corresponding init_macro
class AifesLayer_Dense(AifesLayer):
def __init__(self, input_shape: np.ndarray, output_shape: np.ndarray, layer_name='dense'):
super().__init__(Layer.DENSE, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_DENSE_{DTYPE_C}_A(" + str(input_shape) + ");"
class AifesLayer_DenseTranspose(AifesLayer):
def __init__(self, input_shape: np.ndarray, output_shape: np.ndarray, layer_name='dense'):
super().__init__(Layer.DENSE_WT, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_DENSE_{DTYPE_C}_A(" + str(input_shape) + ");"
class AifesLayer_Elu(AifesLayer):
def __init__(self, alpha_value: float, input_shape=None, output_shape=None, layer_name='elu' ):
super().__init__(Layer.ELU, layer_name, input_shape, output_shape)
# Alpha Value
self.alpha_value = alpha_value
# Init macro
self.init_macro = "AILAYER_ELU_{DTYPE_C}_A({Q_START_INIT}" + str(alpha_value) + "{Q_STOP_INIT});"
class AifesLayer_Input(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='input'):
super().__init__(Layer.INPUT, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_INPUT_{DTYPE_C}_A(" + "2" + ", input_layer_shape);"
class AifesLayer_Linear(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='linear'):
super().__init__(Layer.LINEAR, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = None
class AifesLayer_Leaky_ReLU(AifesLayer):
def __init__(self, alpha_value: float, input_shape=None, output_shape=None, layer_name='leaky_relu'):
super().__init__(Layer.LEAKY_RELU, layer_name, input_shape, output_shape)
# Alpha Value
self.alpha_value = alpha_value
# Init macro
self.init_macro = "AILAYER_LEAKY_RELU_{DTYPE_C}_A({Q_START_INIT}" + str(alpha_value) + "{Q_STOP_INIT});"
class AifesLayer_ReLU(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='relu'):
super().__init__(Layer.RELU, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_RELU_{DTYPE_C}_A();"
class AifesLayer_Sigmoid(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='sigmoid'):
super().__init__(Layer.SIGMOID, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_SIGMOID_{DTYPE_C}_A();"
class AifesLayer_Softmax(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='softmax'):
super().__init__(Layer.SOFTMAX, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_SOFTMAX_{DTYPE_C}_A();"
class AifesLayer_Softsign(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='softsign'):
super().__init__(Layer.SOFTSIGN, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_SOFTSIGN_{DTYPE_C}_A();"
class AifesLayer_Tanh(AifesLayer):
def __init__(self, input_shape=None, output_shape=None, layer_name='tanh'):
super().__init__(Layer.TANH, layer_name, input_shape, output_shape)
# Init macro
self.init_macro = "AILAYER_TANH_{DTYPE_C}_A();"
# Class for AIfES Model, contains the extracted structure of the Keras Model
class AifesModel:
def __init__(self, aifes_fnn_structure: List[AifesLayer], aifes_layer_count: int, flatten_aifes_weights: list):
self.aifes_fnn_structure = aifes_fnn_structure
self.aifes_layer_count = aifes_layer_count
self.flatten_aifes_weights = flatten_aifes_weights
# Define str function for easier debug
def __str__(self):
output_str = "####AIfES Model####\n"
for el in self.aifes_fnn_structure:
output_str += str(el) + "\n"
output_str += "Layer count: " + str(self.aifes_layer_count) + "\n"
output_str += "Layer Weights: "
output_str += str(self.flatten_aifes_weights)
return output_str | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/support/aifes_model.py | aifes_model.py |
import warnings
from ..support.aifes_model import *
try:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import ELU, LeakyReLU, ReLU, Softmax
from tensorflow.keras.activations import sigmoid, softsign, tanh
except ImportError as err:
raise ImportError("Tensorflow is not installed. Please make sure that you install Tensorflow in the right version "
"(>= 2.4) to convert your model from Keras to AIfES.")
from packaging import version
try:
assert version.parse(tf.version.VERSION) >= version.parse('2.4.0')
except AssertionError as err:
raise ImportError("Tensorflow is not installed in the required version. Please install version 2.4 and above.")
class KerasExtractor:
"""Keras Extractor Class. Provides interface functions for the AifesCodeGenerator to extract values from a
Keras model"""
# Activation Functions available in Dense Layer
ACT_FUNCTIONS = ['elu', 'leakyrelu', 'leaky_relu', 'relu', 'softsign', 'softmax', 'sigmoid', 'tanh']
# Separate Activation Functions as Keras.Layers
ACT_LAYER = [ELU, LeakyReLU, ReLU, softsign, Softmax, sigmoid, tanh]
def __init__(self, model: keras.Model, use_transposed_layers=False):
"""
Initialize the KerasExtractor
:param model: Keras Model which should be converted
:param use_transposed_layers: If transposed layers should be used for the dense layers
"""
self._model = model
self._aifes_model = None
self._use_transposed_layers = use_transposed_layers
self._has_bias = True
def extractor_structure(self) -> AifesModel:
"""
Extracts the Keras model and saves it as an AIfES Model representation
:return: Returns a representation of the Keras model as AIfES Model
"""
# Local variables
# Contains the AIfES structure after extraction
aifes_fnn_structure = []
# Get layer count
layer_count = len(self._model.layers)
aifes_layer_count = layer_count + 1
# Go through each layer and extract values from it
for x in range(0, layer_count, 1):
curr_layer = self._model.layers[x]
# Check if current layer is a dense layer
if self._is_dense_layer(curr_layer):
# Check if first layer, then we need to add an input layer
if x == 0:
aifes_fnn_structure.append(AifesLayer_Input(self._model.layers[x].input_shape[1],
self._model.layers[x].input_shape[1]))
# Add corresponding dense layer depending on if transposed layers should be used
if not self._use_transposed_layers:
aifes_fnn_structure.append(AifesLayer_Dense(self._model.layers[x].units,
self._model.layers[x].units))
else:
aifes_fnn_structure.append(AifesLayer_DenseTranspose(self._model.layers[x].units,
self._model.layers[x].units))
# Check if dense layer contains activation, if not, no activation is added
if self._is_dense_layer_with_activation(curr_layer):
aifes_fnn_structure.append(self._get_activation_function(curr_layer))
else:
if self._is_unsupported_activation_function(curr_layer):
raise ValueError(f"Unsupported activation function in layer {x}. See "
f"https://fraunhofer-ims.github.io/AIfES_for_Arduino/#OverviewFeatures "
f"for available activation functions.")
# Check if current layer is an activation layer and is after the first layer
elif self._is_activation_layer(curr_layer) and x > 0:
# Add activation layer to AIfES model
aifes_fnn_structure.append(self._get_activation_layer(curr_layer))
# Layer is neither a dense nor activation layer, raise error
else:
if x == 0:
raise ValueError(f"First layer needs to be a dense layer. Got '{curr_layer}' instead.")
else:
raise ValueError(f"Unsupported layer chosen. Got '{curr_layer}', but must be one of "
"Dense, ELU, LeakyReLU, linear, relu, sigmoid, softmax, softsign or "
"tanh")
# Create AIfES Model and return it
self._aifes_model = AifesModel(aifes_fnn_structure, aifes_layer_count, None)
return self._aifes_model
def extractor_values(self):
"""
Extracts the values of a Keras model and returns them
:return: Extracted weights
"""
if not self._has_bias:
raise ValueError("Your model needs dense layer with bias for a conversion to AIfES with weights. Please "
"ensure that your layers have bias.")
weights = self._model.get_weights()
return weights
def get_transpose_status(self) -> bool:
"""
Returns status, if transposed layers should be used
:return: Bool, True if transposed layers are used, otherwise False
"""
return self._use_transposed_layers
def _is_dense_layer(self, curr_layer) -> bool:
"""
Checks if current layer is a correctly formated dense layer
:param curr_layer: Layer of the model, which should be checked
:return: True, if current layer is dense layer, otherwise False
"""
if curr_layer.__class__.__name__ == 'Dense':
if self._is_correctly_configured_dense_layer(curr_layer):
return True
else:
return False
else:
return False
def _is_dense_layer_with_activation(self, curr_layer) -> bool:
"""
Checks is activation function is part of self.ACT_FUNCTIONS, and has therefore an activation function. Linear activation function is default and therefore not considered as activation function.
:param curr_layer: Current layer, which should be checked
:return: True, if activation function is set and supported, otherwise False
"""
# Get activation function
layer_config = curr_layer.get_config()
acti = layer_config["activation"]
# When configurable activation function, acti is of type dict. We need only the name, so we extract it here
if type(acti) is dict:
acti = acti['class_name'].lower()
# Check if acti is part of ACT_FUNCTIONS
if acti in self.ACT_FUNCTIONS:
return True
else:
return False
def _get_activation_function(self, curr_layer) -> AifesLayer:
"""
Returns the activation layer for AIfES of the curr_layer. Extracts the value from a dense layer with set activation function.
:param curr_layer: Current layer, from which the activation function should be extracted
:return: AifesLayer with the initialized AIfES activation layer
"""
# Get activation function
layer_config = curr_layer.get_config()
acti = layer_config["activation"]
# When configurable activation function, acti is of type dict. We need only the name, so we extract it here
if type(acti) is dict:
acti = acti['class_name'].lower()
# Return corresponding activation layer
if acti == 'elu':
if type(layer_config["activation"]) is dict:
return AifesLayer_Elu(layer_config["activation"]["config"]["alpha"])
else:
warnings.warn("Elu layer was not customized. The default alpha value of 1.0 is used. ")
return AifesLayer_Elu(alpha_value=1.0)
elif acti == 'leakyrelu':
if type(layer_config["activation"]) is dict:
return AifesLayer_Leaky_ReLU(layer_config["activation"]["config"]["alpha"])
else:
warnings.warn("LeakyRelu was not customized. The default alpha value of 0.3 is used. ")
return AifesLayer_Leaky_ReLU(alpha_value=0.3)
elif acti == 'leaky_relu':
warnings.warn("LeakyRelu was not customized. The default alpha value of 0.3 is used. ")
return AifesLayer_Leaky_ReLU(alpha_value=0.3)
elif acti == 'linear':
return AifesLayer_Linear()
elif acti == 'relu':
return AifesLayer_ReLU()
elif acti == 'sigmoid':
return AifesLayer_Sigmoid()
elif acti == 'softmax':
return AifesLayer_Softmax()
elif acti == 'softsign':
return AifesLayer_Softsign()
elif acti == 'tanh':
return AifesLayer_Tanh()
else:
raise ValueError(
"Unsupported activation in layer. Got " + str(acti) + ", but must be part of"
"ELU, LeakyReLU, linear, relu, sigmoid, softmax, softsign or tanh")
def _is_activation_layer(self, curr_layer) -> bool:
"""
Check if current layer is an activation layer. Checks self.ACT_LAYER if curr_layer is included.
:param curr_layer: Current layer
:return: True, if current layer is activation layer, otherwise False
"""
if type(curr_layer) in self.ACT_LAYER:
return True
else:
return False
def _get_activation_layer(self, curr_layer) -> AifesLayer:
"""
Returns the activation layer for AIfES of the curr_layer. Checks the type of the curr_layer. (Independent activation function, not set with Dense layer)
:param curr_layer: Current layer
:return: AifesLayer with the initialized AIfES activation layer
"""
layer_type = type(curr_layer)
if layer_type == ELU:
return AifesLayer_Elu(curr_layer.alpha)
elif layer_type == LeakyReLU:
return AifesLayer_Leaky_ReLU(curr_layer.alpha)
elif layer_type == ReLU:
return AifesLayer_ReLU()
elif layer_type == sigmoid:
return AifesLayer_Sigmoid()
elif layer_type == Softmax:
return AifesLayer_Softmax()
elif layer_type == softsign:
return AifesLayer_Softsign()
elif layer_type == tanh:
return AifesLayer_Tanh()
else:
raise ValueError("Unsupported activation layer " + str(layer_type) + ". Activation Layer needs to be of"
" type ELU, LeakyReLU, ReLU, Sigmoid, Softmax, Softsign or Tanh")
def _is_unsupported_activation_function(self, curr_layer):
# Get activation function
layer_config = curr_layer.get_config()
acti = layer_config["activation"]
# When configurable activation function, acti is of type dict. We need only the name, so we extract it here
if type(acti) is dict:
acti = acti['class_name'].lower()
if acti == 'linear':
return False
else:
return True
def _is_correctly_configured_dense_layer(self, curr_layer):
if str(curr_layer.dtype) != 'float32':
raise ValueError(f"A dense layer has not the data type 'float32', but instead {curr_layer.dtype}. Please "
f"change it to 'float32'")
if str(curr_layer.use_bias) != 'True':
self._has_bias = False
return True | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/keras_extractor/keras_extractor.py | keras_extractor.py |
from ..support.aifes_model import AifesModel, configurable_act_layer
from .support_model_conversion_q7 import *
class ModelConversion:
def __init__(self, aifes_model: AifesModel, representative_dataset: np.ndarray,
alignment: int, byteorder: str):
"""
Initializes the class with common variables.
:param aifes_model: AIfES model which should be converted
:param representative_dataset: representative dataset for conversion of the ANN
:param alignment: Alignment of target architecture in bytes, i.e. 2 for AVR Arduino (16 bit MCU),
4 for ARM Cortex (32 Bit MCU)der: Byteorder of target a
:param byteorrchitecture, i.e. 'little' for little endian and 'big' for big endian
"""
self._aifes_model = aifes_model
self._representative_dataset = representative_dataset
self._alignment = alignment
self._byteorder = byteorder
def convert_to_q7(self, weights):
"""
Converts the given weights to a flatbuffer in q7 style. Uses AIfES style of Q7 implementation.
:param self:
:param weights: Weights of the model, which should be converted. Can be extrated from Keras model by using keras_extractor_values
:returns: Returns resulting Q parameters for layers and weights/bias
"""
# Representation of the model for AIfES pytools
layers = []
act_params = [] # Append additional parameters
for el in self._aifes_model.aifes_fnn_structure:
layers.append(el.layer_type)
if el.layer_type in configurable_act_layer:
act_params.append(el.alpha_value)
result_q_params, weights_q_params, weights_q7 = quantize_model_q7(layers, weights, self._representative_dataset,
act_params=act_params)
flatbuffer_q7 = create_flatbuffer_q7(result_q_params, weights_q_params, weights_q7,
target_alignment=self._alignment, byteorder=self._byteorder)
self._aifes_model.flatten_aifes_weights = flatbuffer_q7
return result_q_params, weights_q_params | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/model_converter/model_conversion.py | model_conversion.py |
import numpy as np
from ..support.aifes_model import Layer
def calc_q_params_q7(min_value, max_value):
"""Calculate quantization parameters for values of the given range
"""
if max_value - min_value < 0:
raise Exception('min_value has to be smaller than max_value.')
elif max_value == 0 and min_value == 0:
shift = 0
zero_point = 0
return (shift, zero_point)
# An interval that does not include the zero has to be handled special
if min_value > 0 and max_value > 0:
min_value = 0
elif min_value < 0 and max_value < 0:
max_value = 0
min_target = -128
target_interval_bitlen = 8
interval_old = max_value - min_value
value_interval_bitlen = -24
while (2 ** value_interval_bitlen) <= interval_old:
value_interval_bitlen += 1
interval_new = 2 ** value_interval_bitlen
min_new = min_value - (interval_new - interval_old) / 2.0
if target_interval_bitlen - value_interval_bitlen < 0:
raise Exception('One or more values are too big to quantize them to a 8 bit integer.')
shift = int(target_interval_bitlen - value_interval_bitlen)
zero_point = int(np.round(-min_new * (1 << shift)) + min_target)
return (shift, zero_point)
def quantize_tensor_q7(tensor, q_params):
"""Quantize the tensor to Q7 representation with the given quantization parameters (shift, zero_point)
"""
tensor_q7 = np.round(tensor * (1 << q_params[0]) + q_params[1]).astype(np.int32)
return tensor_q7
def quantize_model_q7(layers, weights, representative_dataset, act_params=[]):
"""Quantize the model to the Q7 representation
Arguments:
layers -- List of the layers (type Layer Enum)
weights -- F32 weights
representative_dataset -- A dataset that is representative for the whole training dataset (To calculate min and max values)
act_params -- A list containing parameters of the activation functions (e.g. the Leaky ReLU)
"""
intermediate_q_params = [calc_q_params_q7(1.1*np.min(representative_dataset), 1.1*np.max(representative_dataset))]
result_q_params = [intermediate_q_params[-1]] # The quantization params that are in the parameter memory (rest is in inference / training memory, because it is not configurable by the user)
weights_q7 = []
weights_q_params = []
dense_idx = 0
act_param_idx = 0
x = representative_dataset
for layer in layers:
if layer == Layer.DENSE or layer == Layer.DENSE_WT:
y = x @ weights[2*dense_idx] + weights[2*dense_idx+1]
w_q_params = calc_q_params_q7(-np.max(np.abs(weights[2*dense_idx])), np.max(np.abs(weights[2*dense_idx])))
b_q_params = (w_q_params[0] + intermediate_q_params[-1][0], 0)
intermediate_q_params.append(calc_q_params_q7(1.1*np.min(y), 1.1*np.max(y)))
result_q_params.append(intermediate_q_params[-1])
weights_q7.append(quantize_tensor_q7((weights[2*dense_idx].T if layer == Layer.DENSE_WT else weights[2*dense_idx]), w_q_params).astype(np.int8))
weights_q_params.append(w_q_params)
weights_q7.append(quantize_tensor_q7(weights[2*dense_idx+1], b_q_params).astype(np.int32))
weights_q_params.append(b_q_params)
dense_idx += 1
elif layer == Layer.SIGMOID:
y = 1.0 / (1.0 + np.exp(-x))
intermediate_q_params.append((8, -2**7))
elif layer == Layer.SOFTMAX:
y = np.exp(y-np.max(y)) / np.sum(np.exp(y-np.max(y)))
intermediate_q_params.append((8, -2**7))
elif layer == Layer.TANH:
y = np.tanh(x)
intermediate_q_params.append((7, 0))
elif layer == Layer.SOFTSIGN:
y = x / (1 + np.abs(x))
intermediate_q_params.append((7, 0))
elif layer == Layer.LEAKY_RELU:
alpha = act_params[act_param_idx]
y = np.where(x > 0, x, x * alpha)
intermediate_q_params.append(intermediate_q_params[-1])
act_param_idx += 1
elif layer == Layer.RELU:
y = np.where(x > 0, x, 0.0)
intermediate_q_params.append(intermediate_q_params[-1])
elif layer == Layer.ELU:
alpha = act_params[act_param_idx]
y = np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
intermediate_q_params.append(intermediate_q_params[-1])
act_param_idx += 1
elif layer == Layer.INPUT:
y = x
x = y
return result_q_params, weights_q_params, weights_q7
def pad_buffer(buffer, target_alignment):
"""Add zeros to the buffer according to the target alignment
"""
while len(buffer) % target_alignment != 0:
buffer += b'\x00'
return buffer
def q_params_q7_to_bytes(q_params, target_alignment, byteorder='little'):
"""Converts the given Q7 quantization parameters to a byte array
Arguments:
q_params -- (shift, zero_point)
target_alignment -- The alignment of the structs on the target achitecture. Must be the same as configured in the AIFES_MEMORY_ALIGNMENT macro in aifes.
byteorder -- "little" for little endian; "big" for big endian. Has to match to the byte order of the target architecture.
"""
buffer = b''
buffer += q_params[0].to_bytes(2, byteorder) # shift uint16
buffer += q_params[1].to_bytes(1, byteorder, signed=True) # zero_point int8
buffer = pad_buffer(buffer, target_alignment) # padding
return buffer
def q_params_q31_to_bytes(q_params, target_alignment, byteorder='little'):
"""Converts the given Q31 quantization parameters to a byte array
Arguments:
q_params -- (shift, zero_point)
target_alignment -- The alignment of the structs on the target achitecture. Must be the same as configured in the AIFES_MEMORY_ALIGNMENT macro in aifes.
byteorder -- "little" for little endian; "big" for big endian. Has to match to the byte order of the target architecture.
"""
buffer = b''
buffer += q_params[0].to_bytes(2, byteorder) # shift uint16
buffer += q_params[1].to_bytes(4, byteorder, signed=True) # zero_point int32
buffer = pad_buffer(buffer, target_alignment) # padding
return buffer
def create_flatbuffer_q7(result_q_params, weights_q_params, weights_q7, target_alignment, byteorder='little'):
"""Creats a byte array, containing all given model parameters like quantization parameters and quantized weights.
Arguments:
result_q_params -- Quantization parameter tuples for the layer results
weights_q_params -- Quantization parameter tuples for the weights and biases
weights_q7 -- Weights and biases as a list of numpy arrays
target_alignment -- The alignment of the arrays and structs on the target achitecture. Must be the same as configured in the AIFES_MEMORY_ALIGNMENT macro in aifes.
byteorder -- "little" for little endian; "big" for big endian. Has to match to the byte order of the target architecture.
"""
flatbuffer = b''
for res_params in result_q_params:
flatbuffer += q_params_q7_to_bytes(res_params, target_alignment, byteorder)
for w_q_params, w in zip(weights_q_params, weights_q7):
if w.dtype == np.int8:
flatbuffer += q_params_q7_to_bytes(w_q_params, target_alignment, byteorder)
elif w.dtype == np.int32:
flatbuffer += q_params_q31_to_bytes(w_q_params, target_alignment, byteorder)
if byteorder == 'big':
# Switch to big endian stype
flatbuffer += w.byteswap().tobytes()
else:
flatbuffer += w.tobytes()
flatbuffer = pad_buffer(flatbuffer, target_alignment) # padding to match alignment
return flatbuffer
def create_flatbuffer_f32(weights_f32):
"""Creats a byte array for F32 models, containing all given weights.
Arguments:
weights_f32 -- Weights and biases as a list of numpy arrays
"""
flatbuffer = b''
for w in weights_f32:
flatbuffer += w.tobytes()
return flatbuffer
def str_flatbuffer_c_style(flatbuffer, target_alignment=4, mutable=True, elements_per_line=-1, byteorder='little') -> str:
"""Print the given flatbuffer to the console for easy copy into your code.
Arguments:
flatbuffer -- A byte array containing the model parameters
target_alignment -- The alignment of the structs on the target achitecture. Must be the same as configured in the AIFES_MEMORY_ALIGNMENT macro in aifes.
mutable -- False if the parameters will not be changed afterwards. (For example if you want to do only inferences.)
elements_per_line -- Number of array elements that are printed per line
byteorder -- "little" for little endian; "big" for big endian. Has to match to the byte order of the target architecture.
"""
if elements_per_line == -1:
elements_per_line = int(16 / target_alignment) + 4
pad_buffer(flatbuffer, target_alignment)
# Turn byte order for little or big endian
flatbuffer_turned = []
if byteorder == 'little':
for i in range(int(len(flatbuffer)/target_alignment)):
buffer = []
for j in range(target_alignment):
buffer.append(int(flatbuffer[(i+1)*target_alignment - j - 1]))
flatbuffer_turned.extend(buffer)
else:
for byte in flatbuffer:
flatbuffer_turned.append(int(byte))
out_string = "const uint32_t parameter_memory_size = {};\n".format(len(flatbuffer_turned))
if not mutable:
out_string += "const "
count = 0
if target_alignment == 1:
out_string += "uint8_t"
elif target_alignment == 2:
out_string += "uint16_t"
elif target_alignment == 4:
out_string += "uint32_t"
else:
raise Exception('Only a target_alignment of 1, 2 or 4 is supported.')
out_string += " model_parameters[" + str(int(len(flatbuffer_turned) / target_alignment)) + "] = {\n "
out_string += "0x"
for byte in flatbuffer_turned:
if count != 0 and count % target_alignment == 0:
out_string += ", "
if int(count / target_alignment) % elements_per_line == 0:
out_string += "\n"
out_string += " 0x"
else:
out_string += "0x"
out_string += "{:02X}".format(byte)
count += 1
out_string += "\n};\n"
return out_string | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/model_converter/support_model_conversion_q7.py | support_model_conversion_q7.py |
from .pytorch_extractor_utils import get_layer_list
from ..support.aifes_model import *
try:
import torch
from torch.nn import Module
from torch.nn import Linear
from torch.nn import ELU, LeakyReLU, ReLU, Sigmoid, Softmax, Softsign, Tanh
except ImportError as err:
raise ImportError("PyTorch is not installed. Please make sure that you install PyTorch in the right version "
"(>= 1.8) to convert your model from PyTorch to AIfES")
from packaging import version
try:
assert version.parse(torch.version.__version__) >= version.parse('1.8.0')
except AssertionError as err:
raise ImportError("PyTorch is no installed in the required version. Please install version 1.8 and above.")
class PytorchExtractor:
"""PyTorch Extractor Class. Provides interface functions for the AifesCodeGenerator to extract values from a PyTorch model"""
# Activation Layers available in AIfES as Type from torch.nn
ACT_FUNCTIONS = [ELU, LeakyReLU, ReLU, Softsign, Softmax, Sigmoid, Tanh]
def __init__(self, model: Module, use_transposed_layers=False):
"""
Initialize the PyTorchExtractor
:param model: PyTorch Model which should be converted
:param use_transposed_layers: If transposed layers should be used for the dense layers
"""
self._model = model
self._aifes_model = None
self._use_transposed_layers = use_transposed_layers
self._has_bias = True
def extractor_structure(self) -> AifesModel:
"""
Extracts the PyTorch model and saves it as an AIfES Model representation
:return: Returns a representation of the PyTorch model as AIfES Model
"""
# Local variables
# Contains the PyTorch layer model as a list
fnn_structure = get_layer_list(self._model)
# Contains the AIfES structure after extraction
aifes_fnn_structure = []
# Get layer count
layer_count = len(fnn_structure)
aifes_layer_count = self._get_layer_cnt(fnn_structure) + 1
# Go through each layer and extract values from it
for x in range(0, layer_count, 1):
curr_layer = fnn_structure[x]
# If first layer, we need to add an input layer
if x == 0:
if self._is_dense_layer(curr_layer):
aifes_fnn_structure.append(AifesLayer_Input(curr_layer.in_features, curr_layer.in_features))
else:
raise ValueError("First layer of the model needs to be a 'linear' layer. Got " + str(type(curr_layer))
+ " instead.")
# Check if dense layer
if self._is_dense_layer(curr_layer):
# Add corresponding dense layer depending on if transposed layers should be used
if not self._use_transposed_layers:
aifes_fnn_structure.append(AifesLayer_Dense(curr_layer.out_features, curr_layer.out_features))
else:
aifes_fnn_structure.append(AifesLayer_DenseTranspose(curr_layer.out_features, curr_layer.out_features))
# Check if activation layer
elif self._is_activation_layer(curr_layer):
aifes_fnn_structure.append(self._get_activation_layer(curr_layer))
# Layer is neither a dense nor a supported activation layer, raise error
else:
raise ValueError("Unsupported layer in layer " + str(x) + ". Got " + str(curr_layer) + ", but must be part of"
" ELU, LeakyReLU, Linear, ReLU, Sigmoid, Softmax, Softsign or Tanh")
# Export AIfES model and return it
self._aifes_model = AifesModel(aifes_fnn_structure, aifes_layer_count, None)
return self._aifes_model
def extractor_values(self):
"""
Extracts the values of the PyTorch model and returns them
:return: Extracted weights
"""
if not self._has_bias:
raise ValueError("Your model needs linear layer with bias for a conversion to AIfES with weights. Please "
"ensure that your layers have bias.")
weights = [param.detach().numpy().T for param in self._model.parameters()]
return weights
def get_transpose_status(self) -> bool:
"""
Returns status, if transposed layers should be used
:return: Bool, True if transposed layers are used, otherwise False
"""
return self._use_transposed_layers
def _is_dense_layer(self, curr_layer) -> bool:
"""
Checks if current layer is a dense layer
:param curr_layer: Layer of the model, which should be checked
:return: True, if current layer is dense layer, otherwise False
"""
if type(curr_layer) is Linear:
if self._is_correctly_configured_dense_layer(curr_layer):
return True
else:
return False
else:
return False
def _get_layer_cnt(self, model) -> int:
"""
Count the number of fnn (Linear) in the PyTorch net
:return: Number of layer
"""
layer_cnt = 0
for layer in model:
if type(layer) is Linear:
layer_cnt += 1
return layer_cnt
def _is_activation_layer(self, curr_layer) -> bool:
"""
Check if current layer is an activation layer
:param curr_layer: Current layer from model
:return: True/False depending on layer type
"""
if type(curr_layer) in self.ACT_FUNCTIONS:
return True
else:
return False
def _get_activation_layer(self, curr_layer) -> AifesLayer:
"""
Returns the activation layer for AIfES of the curr_layer. Checks the type of the curr_layer. (Independent activation function, not set with Dense layer)
:param curr_layer: Current layer
:return: AifesLayer with the initialized AIfES activation layer
"""
layer_type = type(curr_layer)
if layer_type == ELU:
return AifesLayer_Elu(curr_layer.alpha)
elif layer_type == LeakyReLU:
return AifesLayer_Leaky_ReLU(curr_layer.negative_slope)
elif layer_type == ReLU:
return AifesLayer_ReLU()
elif layer_type == Sigmoid:
return AifesLayer_Sigmoid()
elif layer_type == Softmax:
return AifesLayer_Softmax()
elif layer_type == Softsign:
return AifesLayer_Softsign()
elif layer_type == Tanh:
return AifesLayer_Tanh()
else:
raise ValueError("Unsupported activation layer " + str(layer_type) + ". Activation Layer needs to be of type "
"ELU, LeakyReLU, ReLU, Sigmoid, Softmax, Softsign or Tanh")
def _is_correctly_configured_dense_layer(self, curr_layer):
if curr_layer.bias is None:
self._has_bias = False
return True | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/pytorch_extractor/pytorch_extractor.py | pytorch_extractor.py |
from typing import List
import math
from ..support.aifes_model import AifesLayer, Layer, Dtype, dtype_to_aifes, act_layer, configurable_act_layer
from ..model_converter.support_model_conversion_q7 import calc_q_params_q7
def aifes_create_model_structure(layer_structure: List[AifesLayer], dtype: Dtype, implementation: str):
"""
Creates the model structure for non-express AIfES versions. Creates the init and definition text blocks for
the placeholder 'PLACEHOLDER_INIT' and 'PLACEHOLDER_DEF'
:param layer_structure: AIfES Layer structure of the current model; is part of AifesModel.aifes_fnn_structure class
and filled by the extractor
:param dtype: Current Dtype type, e.g. Dtype.FLOAT32, Dtype.Q7. Is used for init and definition in template files
:param implementation: Choosen implementation for dense layers, i.e. 'default' or 'cmsis'
:returns: Returns the textblocks for 'PLACEHODER_DEF' (aifes_fnn_layer_def) and 'PLACEHOLDER_INIT'
(aifes_fnn_layer_init)
"""
# Define local variables
# Set dytpe to corret string
dtype_str = dtype_to_aifes[dtype]
# Define input layer
aifes_fnn_layer_def = ("ailayer_input_{dtype}_t" + "\t\t" +
"input_layer\t= " + layer_structure[0].init_macro + "\n").format(dtype=dtype_str,
DTYPE_C=dtype_str.upper())
# Define first dense layer after input layer
aifes_fnn_layer_def += ("ailayer_dense_{dtype}_t" + "\t\t" + "dense_layer_1\t= " +
layer_structure[1].init_macro + "\n").format(dtype=dtype_str, DTYPE_C=dtype_str.upper())
# Init input layer
aifes_fnn_layer_init = f"model.input_layer = ailayer_input_{dtype_str}_default(&input_layer);\n"
# Init first dense layer after input layer
# Extract layer name, i.e. Dense
layer_init = layer_structure[1].layer_name
# If layer ist transposed, corresponding string needs to be added
if layer_structure[1].layer_type == Layer.DENSE_WT:
layer_init += "_wt"
# Init first dense layer
aifes_fnn_layer_init += (f" x = ailayer_{layer_init}_{dtype_str}_{implementation}(&dense_layer_1" +
", model.input_layer);\n")
# Create model structure for definition and init for remaining layers
counter = 1 # Counter to name the dense layers and activation layers according to their position in the ANN
prev_layer_dense = False
for i in range(2, len(layer_structure)):
el = layer_structure[i]
if el.layer_type == Layer.INPUT:
# Should never happen
raise ValueError("Input layer after initial layer. Something has gone wrong.")
if el.layer_type == Layer.LINEAR:
# Linear layer is pass through of values, therefore no additional layer is needed
continue
# Check if previous layer and current layer are dense, then we need to increase the counter, as linear
# activation layer was in between
if prev_layer_dense and el.layer_type not in act_layer:
counter += 1
# Create layer name
layer_name = "{name}_layer_{num}".format(name=el.layer_name, num=str(counter))
# Create definition of layer
aifes_fnn_layer_def += create_definition_of_layer(dtype, el, layer_name)
# Create init of layer depending on type and implementation, i.e. default or cmsis
if el.layer_type in act_layer:
implementation_loc = "default"
counter += 1
prev_layer_dense = False
else:
implementation_loc = implementation
prev_layer_dense = True
# Set layer name to init layer
layer_init = el.layer_name
# If transposed corresponding string needs to be added
if el.layer_type == Layer.DENSE_WT:
layer_init += "_wt"
# Set init of layer
aifes_fnn_layer_init += (f" x = ailayer_{layer_init}_{dtype_str}_{implementation_loc}(&" + layer_name + ", x);"
"\n")
# Set output of model
aifes_fnn_layer_init += " model.output_layer = x;"
return aifes_fnn_layer_def, aifes_fnn_layer_init
def create_definition_of_layer(dtype: Dtype, curr_layer: AifesLayer, layer_name: str):
"""
Creates definition string of given layer for use in template file
:param dtype: Current Dtype, e.g. Dtype.FLOAT32, Dtype.Q7
:param curr_layer: Given Layer for which the definition should be created
:param layer_name: Layer name of given layer in ANN
:return: Definition string of given layer
"""
# Create dtype string
dtype_str = dtype_to_aifes[dtype]
# If current layer is configurable, the macro for definition needs to be set to that value if quantization is active
if curr_layer.layer_type in configurable_act_layer:
# When quantization is active, the alpha value needs to be converted
if dtype == Dtype.Q7:
alpha = curr_layer.alpha_value
if alpha > 0:
max = alpha
min = 0
else:
max = 0
min = alpha
try:
(shift, zero_point) = calc_q_params_q7(min, max)
except:
raise ValueError(f"During quantization of the alpha value {alpha} for the activation function "
f"{curr_layer.layer_name} an error occurred. Please adjust the alpha value so it can "
f"fit within Q7")
# Initialize place holder strings for init macro
q_start_init = "AISCALAR_Q7("
q_stop_init = "," + str(shift) + "," + str(zero_point) + ")"
else:
# No quantization, no place holder needed
q_start_init = ""
q_stop_init = ""
aifes_fnn_layer_def = ("ailayer_{name}_{dtype}_t" + "\t\t" + layer_name + "\t= " + curr_layer.init_macro +
"\n").format(name=curr_layer.layer_name, dtype=dtype_str, DTYPE_C=dtype_str.upper(),
Q_START_INIT=q_start_init, Q_STOP_INIT=q_stop_init)
else:
# No configurable activation layer, so no need to set parameters
aifes_fnn_layer_def = ("ailayer_{name}_{dtype}_t" + "\t\t" + layer_name + "\t= " + curr_layer.init_macro +
"\n").format(name=curr_layer.layer_name, dtype=dtype_str, DTYPE_C=dtype_str.upper())
return aifes_fnn_layer_def
def aifes_express_create_model_structure(aifes_layer_count: int, layer_structure: List[AifesLayer]):
"""
Creates the model structure for AIfES express as a text block for use in template file
:param aifes_layer_count: Number of AIfES Layers, automatically calculated by keras_extractor_structure
:param layer_structure: Layer structure of current ANN, automatically calculated by keras_extractor_structure
:returns: Needed text blocks for use in template, replaces 'PLACEHOLDER_STRUCTURE' (aifes_fnn_structure) and
'PLACEHOLDER_ACTIVATIONS' (aifes_fnn_activations)
"""
# Create Variables for AIfES Express Header file
aifes_fnn_structure = [0] * aifes_layer_count
aifes_fnn_activations = ['AIfES_E_'] * (aifes_layer_count - 1)
# Set Input layer shape
aifes_fnn_structure[0] = layer_structure[0].input_shape
# Set the following layers to correct shape and activation, going through the layers with corresponding dense and
# activation layer
for i in range(1, aifes_layer_count):
# Get corresponding dense layer
if layer_structure[2 * i - 1].layer_type in [Layer.DENSE, Layer.DENSE_WT]:
aifes_fnn_structure[i] = layer_structure[2 * i - 1].input_shape
else:
raise ValueError("Layer " + str(i) + " contains no valid dense layer")
# Get the corresponding actiavtion layer
if layer_structure[2 * i].layer_type in act_layer:
# Check if activation layer is leaky ReLU and check the corresponding alpha value
if layer_structure[2 * i].layer_type == Layer.LEAKY_RELU:
if not math.isclose(layer_structure[2 * i].alpha_value, 0.01, rel_tol=0.01):
raise ValueError("Alpha value of layer {i} for Leaky Relu isn't default value of 0.01 but "
"{alpha}. Please change the value to 0.01!".
format(i=i, alpha=layer_structure[2 * i].alpha_value))
# Check if activation layer is ELU and check the corresponding alpha value
if layer_structure[2 * i].layer_type == Layer.ELU:
if not math.isclose(layer_structure[2 * i].alpha_value, 1.0, rel_tol=0.01):
raise ValueError(
"Alpha value of layer {i} for ELU isn't default value of 1.0 but {alpha}. Please change "
"the value to 1.0!".format(i=i, alpha=layer_structure[2 * i].alpha_value))
# If everything is alright, add activation layer to activation structure
aifes_fnn_activations[i - 1] += layer_structure[2 * i].layer_name
else:
raise ValueError("Layer " + str(i) + " contains no valid activation function")
return aifes_fnn_structure, aifes_fnn_activations | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/aifes_code_generator/support_aifes_code_creator.py | support_aifes_code_creator.py |
import os
from pkg_resources import resource_filename
from ..support.aifes_model import AifesType
from ..support.support_functions import flatten_weights, create_c_array_str
from ..model_converter.support_model_conversion_q7 import str_flatbuffer_c_style
from .support_aifes_code_creator import *
from ..model_converter.model_conversion import ModelConversion
import numpy as np
class AifesCodeGenerator:
"""Aifes Code Generator Class. Uses an AifesModel to create header files that represent this model. Header files can be used in any IDE."""
# Available implementations of dense layers
IMPLEMENTATIONS = ['default', 'cmsis']
# Available byteorder for Q7 quantization
BYTEORDER = ["little", "big"]
def __init__(self, extractor, implementation: str, aifestype: AifesType, destination_path: str, with_weights: bool):
"""
Initializes the AifesCodeGenerator class with common variables
:param extractor: Extractor class used for extraction of the AIfES Model from the corresponding framework
:param implementation: Which implementation should be used
:param aifestype: Which type of AIfES frontend (express, normal) should be used, is from type AifesType
:param destination_path: Destination path for the header files, is automatically created, if not already existent
:param with_weights: If weights should be extracted from the source model
"""
self._extractor = extractor
self._implementation = implementation
self._aifestype = aifestype
self._destination_path = destination_path
self._aifes_model = None
self._with_weights = with_weights
# Check destination path, if it doesn't exist, create it
if not os.path.isdir(self._destination_path):
os.mkdir(self._destination_path)
# Check parameters
if self._implementation not in self.IMPLEMENTATIONS:
raise ValueError("Unsupported implementation type. Got {implementation} but should be one of 'default' or"
" 'cmsis'".format(implementation=self._implementation))
def run_f32(self):
"""
Creates the header files for a f32 based implementation
"""
# Extract the AIfES structure
self._aifes_model = self._extractor.extractor_structure()
# Check which type of frontend should be used and create corresponding header file
if self._aifestype == AifesType.EXPRESS:
self._create_aifes_express_header_fnn_f32()
elif self._aifestype == AifesType.NORMAL:
self._create_aifes_header_fnn_f32()
else:
raise ValueError("Unsupported AIfES Frontend. Should be either 'EXPRESS' or 'NORMAL'")
# Check if weights should be extracted
if self._with_weights:
# Flatten the weights
self._aifes_model.flatten_aifes_weights = flatten_weights(self._extractor.extractor_values(),
self._extractor.get_transpose_status())
# Write the flatten weights into the right header file depending on AIfES Frontend
if self._aifestype == AifesType.EXPRESS:
self._create_aifes_express_weights_fnn_f32()
elif self._aifestype == AifesType.NORMAL:
self._create_aifes_weights_fnn_f32()
else:
raise ValueError("Unsupported AIfES Frontend. Should be either 'EXPRESS' or 'NORMAL'")
def run_q7(self, representative_data: np.ndarray, target_alignment: int, byteorder: str):
"""
Creates the header files for a Q7 implementation. For this the weights of the given model is converted to Q7.
"""
# Check parameter
if byteorder not in self.BYTEORDER:
raise ValueError("Byteorder must be either 'little' or 'big. Got {byteorder}".format(byteorder=byteorder))
# Extract the AIfES structure
self._aifes_model = self._extractor.extractor_structure()
# Check dimension of representative data set and model input
num_input = representative_data.shape[1]
model_input = self._aifes_model.aifes_fnn_structure[0].input_shape
if num_input != model_input:
raise ValueError("The input dimension of the example data ({num_input}) doesn't match the input number of "
"the ANN ({model_input}).".format(num_input=num_input, model_input=model_input))
# Extract weights and bias of model
weights = self._extractor.extractor_values()
# Convert model to quantized model
model_converter = ModelConversion(self._aifes_model, representative_data, target_alignment, byteorder)
q_params_layers, q_params_weights_bias = model_converter.convert_to_q7(weights)
# Create header files depending on the chosen AIfES frontend
if self._aifestype == AifesType.EXPRESS:
self._create_aifes_express_header_fnn_q7()
self._create_aifes_express_weights_fnn_q7(target_alignment, byteorder)
elif self._aifestype == AifesType.NORMAL:
self._create_aifes_header_fnn_q7(q_params_layers)
self._create_aifes_weights_fnn_q7(target_alignment, byteorder)
else:
raise ValueError("Unsupported AIfES Frontend. Should be either 'EXPRESS' or 'NORMAL'")
def _create_aifes_express_header_fnn_f32(self):
"""
Creates the header file with the aifes model as aifes express function. Uses the template file aifes_e_f32_fnn.h.
Checks the init values of alpha from Leaky ReLU and ELU for compatibility with AIfES Express.
"""
# Create aifes structure and activation list
aifes_fnn_structure, aifes_fnn_activations = aifes_express_create_model_structure(
self._aifes_model.aifes_layer_count, self._aifes_model.aifes_fnn_structure)
if self._with_weights:
name_weights = '(void*)aifes_e_f32_flat_weights;'
else:
name_weights = '// Place your flattened layer weights here or give a pointer to them like: ' \
'(void*)aifes_e_f32_flat_weights'
# Edit the template config file with the current net config
checkWords = ("PLACEHOLDER_INPUTS", "PLACEHOLDER_OUTPUTS", "PLACEHOLDER_LAYERS", "PLACEHOLDER_STRUCTURE",
"PLACEHOLDER_ACTIVATIONS", "PLACEHOLDER_WEIGHTS")
repWords = (str(self._aifes_model.aifes_fnn_structure[0].input_shape),
str(self._aifes_model.aifes_fnn_structure[-2].output_shape),
str(self._aifes_model.aifes_layer_count), ', '.join(map(str, aifes_fnn_structure)),
', '.join(map(str, aifes_fnn_activations)), name_weights)
f_template = open(resource_filename(__name__, "templates/aifes_express/aifes_e_f32_fnn.h"), 'r')
f_destination = open(self._destination_path + "/aifes_e_f32_fnn.h", 'w')
for line in f_template:
for check, rep in zip(checkWords, repWords):
line = line.replace(check, rep)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_express_weights_fnn_f32(self):
"""
Creates a header file with flattened weights and bias for usage with aifes express.
Uses template aifes_e_f32_weights
"""
# Create Weights as string
weights = create_c_array_str(self._aifes_model.flatten_aifes_weights)
# Edit the template config file
checkWords = "PLACEHOLDER_WEIGHTS"
repWords = weights
f_template = open(resource_filename(__name__, "templates/aifes_express/aifes_e_f32_weights.h"), 'r')
f_destination = open(self._destination_path + "/aifes_e_f32_weights.h", 'w')
for line in f_template:
line = line.replace(checkWords, repWords)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_express_header_fnn_q7(self):
"""
Creates the header file with the aifes model as aifes express function. Uses the template file aifes_e_q7_fnn.h.
Checks the init values of alpha from Leaky ReLU and ELU for compatibility with AIfES Express.
"""
# Create aifes structure and activation list
aifes_fnn_structure, aifes_fnn_activations = aifes_express_create_model_structure(
self._aifes_model.aifes_layer_count, self._aifes_model.aifes_fnn_structure)
# Edit the template config file with the current net config
checkWords = ("PLACEHOLDER_INPUTS", "PLACEHOLDER_OUTPUTS", "PLACEHOLDER_LAYERS", "PLACEHOLDER_STRUCTURE",
"PLACEHOLDER_ACTIVATIONS")
repWords = (str(self._aifes_model.aifes_fnn_structure[0].input_shape),
str(self._aifes_model.aifes_fnn_structure[-2].output_shape),
str(self._aifes_model.aifes_layer_count), ', '.join(map(str, aifes_fnn_structure)),
', '.join(map(str, aifes_fnn_activations)))
f_template = open(resource_filename(__name__, "templates/aifes_express/aifes_e_q7_fnn.h"), 'r')
f_destination = open(self._destination_path + "/aifes_e_q7_fnn.h", 'w')
for line in f_template:
for check, rep in zip(checkWords, repWords):
line = line.replace(check, rep)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_express_weights_fnn_q7(self, alignment: int, byteorder: str):
"""
Writes flattened weights and biases to header file. Uses template aifes_q7_weights.h.
Expects converted q7 values!
"""
# Create Weights as string
weights = str_flatbuffer_c_style(self._aifes_model.flatten_aifes_weights, target_alignment=alignment,
byteorder=byteorder, mutable=False)
# Edit the template config file
checkWords = "PLACEHOLDER_WEIGHTS"
repWords = weights
f_template = open(resource_filename(__name__, "templates/aifes_express/aifes_e_q7_weights.h"), 'r')
f_destination = open(self._destination_path + "/aifes_e_q7_weights.h", 'w')
for line in f_template:
line = line.replace(checkWords, repWords)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_header_fnn_f32(self):
"""
Creates header file for AIfES (non-express version). Datatype can be selected and cmsis or default
implementation are available. Uses template aifes_f32_fnn.h. Uses dtype f32
:param implementation: Used implementation, i.e. default or cmsis
"""
# Create Variables for AIfES Header file
dtype = Dtype.FLOAT32
layer_structure = self._aifes_model.aifes_fnn_structure
# Create aifes layer definition and init text parts
aifes_fnn_layer_def, aifes_fnn_layer_init = aifes_create_model_structure(layer_structure, dtype, self._implementation)
if self._with_weights:
weights_name = 'uint32_t parameter_memory_size = aialgo_sizeof_parameter_memory(&model);\n\n ' \
'aialgo_distribute_parameter_memory(&model, (void*) aifes_f32_flat_weights, parameter_memory_size);'
else:
weights_name = '/* Initialize your AIfES model here. You can either use a pointer to your flatten array ' \
'like so: \n uint32_t parameter_memory_size = aialgo_sizeof_parameter_memory(&model);\n\n ' \
'aialgo_distribute_parameter_memory(&model, (void*) aifes_f32_flat_weights, parameter_memory_size);\n\n' \
' Or you define the weights and biases per layer. For this you can update the layer initialization, e.g.:\n\n' \
' // Use constant data only for inference. For training remove the const qualifier!!\n ' \
'const float weights_data_dense[] = {-10.1164f, 7.297f, -8.4212f, -7.6482f, 5.4396f, ' \
'-9.0155f};\n const float bias_data_dense[] = {-2.9653f, 2.3677f, -1.5968f};\n ' \
'ailayer_dense_f32_t dense_layer_1 = AILAYER_DENSE_F32_M(3, weights_data_dense, bias_data_dense);\n\n' \
' Alternatively you can set the weights and biases of the layers directly.' \
'\n see https://create.arduino.cc/projecthub/aifes_team/aifes-inference-tutorial-f44d96 for more details*/'
if self._implementation == 'cmsis':
cmsis_include = '\n#include <aifes_cmsis.h>'
elif self._implementation == 'default':
cmsis_include = ''
else:
raise ValueError("Undefined implementation of {}. Must be either 'cmsis' or default".format(implementation))
# Edit the template config file with the current net config
checkWords = ("PLACEHOLDER_INPUTS", "PLACEHOLDER_OUTPUTS", "PLACEHOLDER_LAYER_DEF", "PLACEHOLDER_LAYER_INIT",
"PLACEHOLDER_WEIGHTS", "PLACEHOLDER_CMSIS_INCLUDE")
repWords = (str(self._aifes_model.aifes_fnn_structure[0].input_shape),
str(self._aifes_model.aifes_fnn_structure[-2].output_shape),
aifes_fnn_layer_def,
aifes_fnn_layer_init,
weights_name,
cmsis_include)
f_template = open(resource_filename(__name__, "templates/aifes/aifes_f32_fnn.h"), 'r')
f_destination = open(self._destination_path + "/aifes_f32_fnn.h", 'w')
for line in f_template:
for check, rep in zip(checkWords, repWords):
line = line.replace(check, rep)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_weights_fnn_f32(self):
"""
Writes flattened weights and biases to header file. Uses template aifes_f32_weights.h
"""
# Create Weights as string
weights = create_c_array_str(self._aifes_model.flatten_aifes_weights)
# Edit the template config file
checkWords = "PLACEHOLDER_WEIGHTS"
repWords = weights
f_template = open(resource_filename(__name__, "templates/aifes/aifes_f32_weights.h"), 'r')
f_destination = open(self._destination_path + "/aifes_f32_weights.h", 'w')
for line in f_template:
line = line.replace(checkWords, repWords)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_header_fnn_q7(self, q_params_layers: list):
"""
Creates header file for AIfES (non-express version). Uses template aifes_q7_fnn.h.
Uses dtype Q7.
:param dtype: Datatype of model
:param implementation: Used implementation, i.e. default or cmsis
"""
# Create Variables for AIfES Header file
layer_structure = self._aifes_model.aifes_fnn_structure
dtype = Dtype.Q7
dtype_str = dtype_to_aifes[dtype]
# Create aifes layer definition and init text parts
aifes_fnn_layer_def, aifes_fnn_layer_init = aifes_create_model_structure(layer_structure, dtype, self._implementation)
if self._implementation == 'cmsis':
cmsis_include = '\n#include <aifes_cmsis.h>'
elif self._implementation == 'default':
cmsis_include = ''
else:
raise ValueError("Undefined implementation of {}. Must be either 'cmsis' or default".format(self._implementation))
# Edit the template config file with the current net config
checkWords = ("PLACEHOLDER_INPUTS", "PLACEHOLDER_OUTPUTS", "PLACEHOLDER_LAYER_DEF", "PLACEHOLDER_LAYER_INIT",
"PLACEHOLDER_INPUT_SHIFT", "PLACEHOLDER_INPUT_ZERO", "PLACEHOLDER_OUTPUT_SHIFT",
"PLACEHOLDER_OUTPUT_ZERO", "PLACEHOLDER_CMSIS_INCLUDE")
repWords = (str(self._aifes_model.aifes_fnn_structure[0].input_shape),
str(self._aifes_model.aifes_fnn_structure[-2].output_shape),
aifes_fnn_layer_def,
aifes_fnn_layer_init,
str(q_params_layers[0][0]), str(q_params_layers[0][1]),
str(q_params_layers[-1][0]), str(q_params_layers[-1][1]),
cmsis_include)
f_template = open(resource_filename(__name__, "templates/aifes/aifes_q7_fnn.h"), 'r')
f_destination = open(self._destination_path + "/aifes_q7_fnn.h".format(DTYPE=dtype_str), 'w')
for line in f_template:
for check, rep in zip(checkWords, repWords):
line = line.replace(check, rep)
f_destination.write(line)
f_template.close()
f_destination.close()
def _create_aifes_weights_fnn_q7(self, alignment: int, byteorder: str):
"""
Writes flattened weights and biases to header file. Uses template aifes_q7_weights.h
Uses dtype Q7. Needs converted weights to q7!
"""
# Create Weights as string
weights = str_flatbuffer_c_style(self._aifes_model.flatten_aifes_weights, target_alignment=alignment,
byteorder=byteorder, mutable=False)
# Edit the template config file
checkWords = "PLACEHOLDER_WEIGHTS"
repWords = weights
f_template = open(resource_filename(__name__, "templates/aifes/aifes_q7_weights.h"), 'r')
f_destination = open(self._destination_path + "/aifes_q7_weights.h", 'w')
for line in f_template:
line = line.replace(checkWords, repWords)
f_destination.write(line)
f_template.close()
f_destination.close() | AIfES-Converter | /AIfES_Converter-1.0.0-py3-none-any.whl/aifes/aifes_code_generator/aifes_code_creator.py | aifes_code_creator.py |
# AJAS
[](https://travis-ci.org/swip3798/AJAS)
[](https://www.gnu.org/licenses/lgpl-3.0)
[]()
## Description
Another JSON Api Server. This is a library for Python 3 to create fast JSON-Rest APIs. It uses flask and gevent in the background.
## Usage
A simple start:
```python
from AJAS import Api
def simple_resolver(header, query):
return {"hello":"world"}
api = Api()
api.add_get_resolver("/hello", simple_resolver)
api.add_post_resolver("/hello", simple_resolver)
api.run("localhost", 8080)
```
This will run an webserver returning `'{"hello":"world"}'` at the adress `http://localhost:8080/hello` for both GET and POST requests.
### Using blocks
Using blocks allows you to seperate different parts of your API. This can be used for versioning. Also, blocks can hold a authenticator, which can accept or deny a request based on the header and query of the reuest.
```python
from AJAS import Api, Authenticator
class MyAuth(Authenticator):
def authenticate(self, header, query):
if someCheck() == True:
return True
else:
return False
def simple_resolver(header, query):
return {"hello":"world"}
api = Api()
v1 = Block("/v1")
v1.add_get_resolver("/hello", resolver)
api.add_block(v1)
api.run("localhost", 8080)
```
| AJAS | /AJAS-0.2.3.tar.gz/AJAS-0.2.3/README.md | README.md |
try:
import matplotlib.pyplot as plt
import numpy as np
except ImportError as error:
print(f"Error, Module {error.__class__.__name__} is required")
def Graph2D(x_list,y_list,_description_list):
plt.style.use("default")
plt.figure(figsize=(6,6))
plt.plot(x_list,y_list)
plt.title(_description_list[0])
plt.xlabel(_description_list[1])
plt.ylabel(_description_list[2])
plt.tight_layout()
plt.show()
def Graph2DS2(x_list1,y_list1,x_list2,y_list2,_description_list,_description_list2):
plt.figure(figsize=(8,3))
plt.subplot(1,2,1)
plt.plot(x_list1,y_list1)
plt.title(_description_list[0])
plt.xlabel(_description_list[1])
plt.ylabel(_description_list[2])
plt.subplot(1,2,2)
plt.plot(x_list2,y_list2)
plt.title(_description_list2[0])
plt.xlabel(_description_list2[1])
plt.ylabel(_description_list2[2])
plt.tight_layout()
plt.show()
def Graph2DS3(x_list1,y_list1,x_list2,y_list2,x_list3,y_list3,_description_list,_description_list2,_description_list3):
plt.figure(figsize=(12,3))
plt.subplot(1,3,1)
plt.plot(x_list1,y_list1)
plt.title(_description_list[0])
plt.xlabel(_description_list[1])
plt.ylabel(_description_list[2])
plt.subplot(1,3,2)
plt.plot(x_list2,y_list2)
plt.title(_description_list2[0])
plt.xlabel(_description_list2[1])
plt.ylabel(_description_list2[2])
plt.subplot(1,3,3)
plt.plot(x_list3,y_list3)
plt.title(_description_list3[0])
plt.xlabel(_description_list3[1])
plt.ylabel(_description_list3[2])
plt.tight_layout()
plt.show()
def Graph2DVector(x_list,y_list,_description_list):
x = np.linspace(min(x_list),max(x_list),len(x_list))
y = np.linspace(min(y_list),max(y_list),len(y_list))
X,Y = np.meshgrid(x,y)
x_list = np.ndarray(x_list)
y_list = np.ndarray(y_list)
mag = np.sqrt(x_list**2+y_list**2)
x_list_unit = x_list/mag
y_list_unit = y_list/mag
plt.figure(figsize=(6,6))
plt.quiver(X,Y,x_list_unit,y_list_unit)
plt.title(_description_list[0])
plt.xlabel(_description_list[1])
plt.ylabel(_description_list[2])
plt.tight_layout()
plt.show()
if __name__ == "main":
print("This program is not meant to run as a main file") | AJWorkFlow | /AJWorkFlow-1.0.1-py3-none-any.whl/AJWorkFlow_pkg/Display/Graphs2D.py | Graphs2D.py |
import numpy as np
import logging
from tqdm import tqdm #This package helps us to get output on our terminal vis a progress bar when doing logging. So we don't necessarily need
#To open our log for info. We can see it on the terminal
#This package goes anywhere there is a for loop in our program
class Perceptron:
def __init__(self, eta, epochs):
#This initialize the weights. We used random 3 for (w1,w2, w0). This is because we gonna be using 2 feature X1 and X2
#But if you look at the note. We always need a bias w0 which make it a total of 3 weight
#We also multiply it by 1e-4 because we want it to be very small
self.weights = np.random.randn(3) * 1e-4 # SMALL WEIGHT INIT
logging.info(f"initial weights before training: \n{self.weights}")
self.eta = eta # LEARNING RATE
self.epochs = epochs
def activationFunction(self, inputs, weights):
z = np.dot(inputs, weights) # z = W * X
return np.where(z > 0, 1, 0) # CONDITION, IF TRUE, ELSE. If Z > 0 then Z = 1. ELSE 0
def fit(self, X, y):
self.X = X
self.y = y
#This jut generate an array of bias for you. The bias here is -1
#-np.ones(len(X), 1) = [-1,-1,-1,-1]
X_with_bias = np.c_[self.X, -np.ones((len(self.X), 1))] # CONCATINATION
logging.info(f"X with bias: \n{X_with_bias}")
#This is our training Looop
for epoch in tqdm(range(self.epochs), total = self.epochs, desc = "Training the model"):
logging.info("--"*10) #Don't be confuse here. This just logging.info many dashes for you time 10. For egs: -----------------------------
logging.info(f"for epoch: {epoch}")
logging.info("--"*10)
y_hat = self.activationFunction(X_with_bias, self.weights) # foward propagation
logging.info(f"predicted value after forward pass: \n{y_hat}")
self.error = self.y - y_hat
logging.info(f"error: \n{self.error}")
self.weights = self.weights + self.eta * np.dot(X_with_bias.T, self.error) # backward propagation
logging.info(f"updated weights after epoch:\n{epoch}/{self.epochs} : \n{self.weights}")
logging.info("#####"*10)
def predict(self, X):
X_with_bias = np.c_[X, -np.ones((len(X), 1))]
return self.activationFunction(X_with_bias, self.weights)
def total_loss(self):
total_loss = np.sum(self.error)
logging.info(f"total loss: {total_loss}")
return total_loss | AK-Repo-pypi-midofemi | /AK_Repo_pypi_midofemi-0.0.3-py3-none-any.whl/AK_Repo_pypi/perceptron.py | perceptron.py |
import os
import torch
from torch import nn
from tqdm.auto import tqdm
from datetime import datetime
from torch.utils.data import Subset
from torch.utils.tensorboard import SummaryWriter
from AK_SSL.models import *
from AK_SSL.models.modules.losses import *
from AK_SSL.models.modules.transformations import *
class Trainer:
def __init__(
self,
method: str,
backbone: nn.Module,
feature_size: int,
dataset: torch.utils.data.Dataset,
image_size: int,
save_dir: str = ".",
checkpoint_interval: int = 10,
reload_checkpoint: bool = False,
**kwargs,
):
"""
Description:
Trainer class to train the model with self-supervised methods.
Args:
method (str): Self-supervised method to use. Options: [BarlowTwins, BYOL, DINO, MoCov2, MoCov3, Rotation, SimCLR, SimSiam, SwAV, VICReg]
backbone (nn.Module): Backbone to use.
feature_size (int): Feature size.
dataset (torch.utils.data.Dataset): Dataset to use.
image_size (int): Image size.
save_dir (str): Directory to save the model.
checkpoint_interval (int): Interval to save the model.
reload_checkpoint (bool): Whether to reload the checkpoint.
**kwargs: Keyword arguments.
"""
self.method = method
self.dataset = dataset
self.image_size = image_size
self.backbone = backbone
self.feature_size = feature_size
self.reload_checkpoint = reload_checkpoint
self.checkpoint_interval = checkpoint_interval
self.save_dir = save_dir + f"/{self.method}/"
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.checkpoint_path = self.save_dir + "Pretext/"
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.num_workers = os.cpu_count()
print("----------------AK_SSL----------------")
print("Number of workers:", self.num_workers)
print("Device:", self.device)
print("--------------------------------------")
print(f"Method: {self.method}")
match self.method.lower():
case "barlowtwins":
self.model = BarlowTwins(
self.backbone,
self.feature_size,
hidden_dim=self.feature_size,
**kwargs,
)
self.loss = BarlowTwinsLoss(**kwargs)
self.transformation = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
self.transformation_prime = self.transformation
print(f"Projection Dimension: {self.model.projection_dim}")
print(f"Projection Hidden Dimension: {self.model.hidden_dim}")
print("Loss: BarlowTwins Loss")
print("Transformation: SimCLRViewTransform")
print("Transformation prime: SimCLRViewTransform")
case "byol":
self.model = BYOL(self.backbone, self.feature_size, **kwargs)
self.transformation = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
self.transformation_prime = self.transformation
self.loss = BYOLLoss()
print(f"Projection Dimension: {self.model.projection_dim}")
print(f"Projection Hidden Dimension: {self.model.hidden_dim}")
print(f"Moving average decay: {self.model.moving_average_decay}")
print("Loss: BYOL Loss")
print("Transformation: SimCLRViewTransform")
print("Transformation prime: SimCLRViewTransform")
case "dino":
self.model = DINO(self.backbone, self.feature_size, **kwargs)
self.loss = DINOLoss(
self.model.projection_dim,
self.model.temp_student,
self.model.temp_teacher,
)
self.transformation_global1 = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
self.transformation_global2 = self.transformation_global1
self.transformation_local = self.transformation_global1
print(f"Projection Dimension: {self.model.projection_dim}")
print(f"Projection Hidden Dimension: {self.model.hidden_dim}")
print(f"Bottleneck Dimension: {self.model.projection_dim}")
print(f"Student Temp: {self.model.temp_student}")
print(f"Teacher Temp: {self.model.temp_teacher}")
print(f"Last layer noramlization: {self.model.norm_last_layer}")
print(f"Center Momentum: {self.loss.center_momentum}")
print(f"Teacher Momentum: {self.model.momentum_teacher}")
print(f"Number of crops: {self.model.num_crops}")
print(
f"Using batch normalization in projection head: {self.model.use_bn_in_head}"
)
print("Loss: DINO Loss")
print("Transformation global_1: SimCLRViewTransform")
print("Transformation global_2: SimCLRViewTransform")
print("Transformation local: SimCLRViewTransform")
case "mocov2":
self.model = MoCoV2(self.backbone, self.feature_size, **kwargs)
self.loss = nn.CrossEntropyLoss()
self.transformation = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
print(f"Projection Dimension: {self.model.projection_dim}")
print(f"Number of negative keys: {self.model.K}")
print(f"Momentum for updating the key encoder: {self.model.m}")
print("Loss: InfoNCE Loss")
print("Transformation: SimCLRViewTransform")
case "mocov3":
self.model = MoCov3(self.backbone, self.feature_size, **kwargs)
self.loss = InfoNCE_MoCoV3(**kwargs)
self.transformation = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
self.transformation_prime = self.transformation
print(f"Projection Dimension: {self.model.projection_dim}")
print(f"Projection Hidden Dimension: {self.model.hidden_dim}")
print(f"Moving average decay: {self.model.moving_average_decay}")
print("Loss: InfoNCE Loss")
print("Transformation: SimCLRViewTransform")
print("Transformation prime: SimCLRViewTransform")
case "rotation":
pass
case "simclr":
self.model = SimCLR(self.backbone, self.feature_size, **kwargs)
self.loss = NT_Xent(**kwargs)
self.transformation = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
print(f"Projection Dimension: {self.model.projection_dim}")
print(
f"Projection number of layers: {self.model.projection_num_layers}"
)
print(
f"Projection batch normalization: {self.model.projection_batch_norm}"
)
print("Loss: NT_Xent Loss")
print("Transformation: SimCLRViewTransform")
case "simsiam":
self.model = SimSiam(
self.backbone,
self.feature_size,
projection_hidden_dim=self.feature_size,
prediction_hidden_dim=self.feature_size // 4,
**kwargs,
)
self.loss = NegativeCosineSimilarity(**kwargs)
self.transformation = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
print(f"Projection Dimension: {self.model.projection_dim}")
print(
f"Projection Hidden Dimension: {self.model.projection_hidden_dim}"
)
print(
f"Prediction Hidden Dimension: {self.model.prediction_hidden_dim}"
)
print("Loss: Negative Cosine Simililarity")
print("Transformation: SimCLRViewTransform")
case "swav":
self.model = SwAV(self.backbone, self.feature_size, **kwargs)
self.loss = SwAVLoss(self.model.num_crops + 2, **kwargs)
self.transformation_global = SimCLRViewTransform(
imgage_size=self.image_size, **kwargs
)
self.transformation_local = SimCLRViewTransform(
image_size=self.image_size, **kwargs
)
print(f"Projection Dimension: {self.model.projection_dim}")
print(f"Projection Hidden Dimension: {self.model.hidden_dim}")
print(f"Number of crops: {self.model.num_crops}")
print("Loss: SwAV Loss")
print("Transformation global: SimCLRViewTransform")
print("Transformation local: SimCLRViewTransform")
case "vicreg":
pass
case _:
raise Exception("Method not found.")
print("--------------------------------------")
print(self.dataset)
print("--------------------------------------")
self.model = self.model.to(self.device)
self.loss = self.loss.to(self.device)
self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
self.writer = SummaryWriter("{}/Logs/{}".format(self.save_dir, self.timestamp))
def get_backbone(self):
return self.model.backbone
def train_one_epoch(self, tepoch, optimizer):
loss_hist_train = 0.0
for images, _ in tepoch:
images = images.to(self.device)
if self.method.lower() in ["barlowtwins", "byol", "mocov3"]:
view0 = self.transformation(images)
view1 = self.transformation_prime(images)
z0, z1 = self.model(view0, view1)
loss = self.loss(z0, z1)
elif self.method.lower() in ["dino"]:
view0 = self.transformation_global1(images)
view1 = self.transformation_global2(images)
viewc = []
if self.model.num_crops > 0:
for _ in range(self.model.num_crops):
viewc.append(self.transformation_local(images))
z0, z1 = self.model(view0, view1, viewc)
loss = self.loss(z0, z1)
elif self.method.lower() in ["swav"]:
view0 = self.transformation_global(images)
view1 = self.transformation_global(images)
viewc = []
if self.model.num_crops > 0:
for _ in range(self.model.num_crops):
viewc.append(self.transformation_local(images))
z0, z1 = self.model(view0, view1, viewc)
loss = self.loss(z0, z1)
else:
view0 = self.transformation(images)
view1 = self.transformation(images)
z0, z1 = self.model(view0, view1)
loss = self.loss(z0, z1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_hist_train += loss.item()
tepoch.set_postfix(loss=loss.item())
return loss_hist_train
def train(
self,
batch_size: int = 256,
start_epoch: int = 1,
epochs: int = 100,
optimizer: str = "Adam",
weight_decay: float = 1e-6,
learning_rate: float = 1e-3,
):
"""
Description:
Train the model.
Args:
batch_size (int): Batch size.
start_epoch (int): Epoch to start the training.
epochs (int): Number of epochs.
optimizer (str): Optimizer to train the model. Options: [Adam, SGD]
weight_decay (float): Weight decay.
learning_rate (float): Learning rate.
"""
match optimizer.lower():
case "adam":
optimizer = torch.optim.Adam(
self.model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
case "sgd":
optimizer = torch.optim.SGD(
self.model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
case "adamw":
optimizer = torch.optim.AdamW(
self.model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
case _:
raise Exception("Optimizer not found.")
train_loader = torch.utils.data.DataLoader(
self.dataset, batch_size=batch_size, shuffle=True, drop_last=True
)
self.model.train(True)
if self.reload_checkpoint:
start_epoch = self.reload_latest_checkpoint()
for epoch in tqdm(
range(start_epoch - 1, epochs),
unit="epoch",
desc="Pretext Task Model Training",
leave=True,
):
with tqdm(train_loader, unit="batch", leave=False) as tepoch:
tepoch.set_description(f"Epoch {epoch + 1}")
loss_per_epoch = self.train_one_epoch(tepoch, optimizer)
self.writer.add_scalar(
"Pretext Task/Loss/train",
loss_per_epoch / len(train_loader),
epoch + 1,
)
self.writer.flush()
if (epoch + 1) % self.checkpoint_interval == 0:
model_path = self.checkpoint_path + "SimCLR_model_{}_epoch{}".format(
self.timestamp, epoch + 1
)
torch.save(self.model.state_dict(), model_path)
model_path = self.checkpoint_path + "SimCLR_model_{}_epoch{}".format(
self.timestamp, epoch + 1
)
torch.save(self.model.state_dict(), model_path)
def evaluate(
self,
dataset_train: torch.utils.data.Dataset,
dataset_test: torch.utils.data.Dataset,
eval_method: str = "linear",
top_k: int = 1,
epochs: int = 100,
optimizer: str = "Adam",
weight_decay: float = 1e-6,
learning_rate: float = 1e-3,
batch_size: int = 256,
fine_tuning_data_proportion: float = 1,
):
"""
Description:
Evaluate the model using the given evaluating method.
Args:
eval_method (str): Evaluation method. Options: [linear, finetune]
top_k (int): Top k accuracy.
epochs (int): Number of epochs.
optimizer (str): Optimizer to train the model. Options: [Adam, SGD]
weight_decay (float): Weight decay.
learning_rate (float): Learning rate.
batch_size (int): Batch size.
dataset_train (torch.utils.data.Dataset): Dataset to train the downstream model.
dataset_test (torch.utils.data.Dataset): Dataset to test the downstream model.
fine_tuning_data_proportion (float): Proportion of the dataset between 0 and 1 to use for fine-tuning.
"""
match eval_method.lower():
case "linear":
net = EvaluateNet(
self.model.backbone,
self.feature_size,
len(dataset_train.classes),
True,
)
case "finetune":
net = EvaluateNet(
self.model.backbone,
self.feature_size,
len(dataset_train.classes),
False,
)
num_samples = len(dataset_train)
subset_size = int(num_samples * fine_tuning_data_proportion)
indices = torch.randperm(num_samples)[:subset_size]
dataset_train = Subset(dataset_train, indices)
match optimizer.lower():
case "adam":
optimizer_eval = torch.optim.Adam(
net.parameters(), lr=learning_rate, weight_decay=weight_decay
)
case "sgd":
optimizer_eval = torch.optim.SGD(
net.parameters(), lr=learning_rate, weight_decay=weight_decay
)
case "adamw":
optimizer_eval = torch.optim.AdamW(
self.model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
case _:
raise Exception("Optimizer not found.")
net = net.to(self.device)
criterion = nn.CrossEntropyLoss()
train_loader_ds = torch.utils.data.DataLoader(
dataset_train, batch_size=batch_size, shuffle=True
)
net.train(True)
for epoch in tqdm(
range(epochs),
unit="epoch",
desc="Evaluate Model Training",
leave=True,
):
with tqdm(train_loader_ds, unit="batch", leave=False) as tepoch_ds:
tepoch_ds.set_description(f"Epoch {epoch + 1}")
loss_hist_train, acc_hist_train = 0.0, 0.0
for images, labels in tepoch_ds:
correct, total = 0, 0
images = images.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer_eval.zero_grad()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
acc_hist_train += acc
# compute loss
loss = criterion(outputs, labels)
tepoch_ds.set_postfix(loss=loss.item(), accuracy=f"{acc:.2f}")
loss_hist_train += loss.item()
loss.backward()
optimizer_eval.step()
self.writer.add_scalar(
"Downstream Task/Loss/train",
loss_hist_train / len(train_loader_ds),
epoch + 1,
)
self.writer.add_scalar(
"Downstream Task/Accuracy/train",
acc_hist_train / len(train_loader_ds),
epoch + 1,
)
self.writer.flush()
test_loader_ds = torch.utils.data.DataLoader(
dataset_test, batch_size=batch_size, shuffle=True
)
correct = 0
total = 0
net.eval()
with torch.no_grad():
for images, labels in tqdm(test_loader_ds, unit="batch"):
images = images.to(self.device)
labels = labels.to(self.device)
outputs = net(images)
_, top = torch.topk(outputs.data, k=top_k, dim=1)
correct_predictions = torch.eq(labels[:, None], top).any(dim=1)
total += labels.size(0)
correct += correct_predictions.sum().item()
print(
f"The top_{top_k} accuracy of the network on the {len(dataset_test)} test images: {(100 * correct / total)}%"
)
self.writer.close()
def load_checkpoint(self, checkpont_dir: str):
self.model.load_state_dict(torch.load(checkpont_dir))
print("Checkpoint loaded.")
def save_backbone(self):
torch.save(self.model.backbone.state_dict(), self.save_dir + "backbone.pth")
print("Backbone saved.")
def relaod_latest_checkpoint(self):
checkpoints = os.listdir(self.checkpoint_path)
checkpoints.sort(key=os.path.getmtime)
if len(checkpoints) == 0:
raise Exception("No checkpoints found.")
self.load_checkpoint(self.checkpoint_path + checkpoints[-1])
return int(checkpoints[-1].split('_')[-1]) + 1 | AK-SSL | /AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/Trainer.py | Trainer.py |
import torch
import copy
import torch.nn as nn
from AK_SSL.models.modules.heads import SwAVProjectionHead
class SwAV(nn.Module):
"""
SwAV: Unsupervised Learning of Visual Features by Contrasting Cluster Assignments
Link: https://arxiv.org/abs/2006.09882
Implementation: https://github.com/facebookresearch/swav
"""
def __init__(
self,
backbone: nn.Module,
feature_size: int,
projection_dim: int = 128,
hidden_dim: int = 2048,
epsilon: float = 0.05,
sinkhorn_iterations: int = 3,
num_prototypes: int = 3000,
queue_length: int = 64,
use_the_queue: int = True,
num_crops: int = 6,
):
super().__init__()
self.backbone = backbone
self.feature_size = feature_size
self.projection_dim = projection_dim
self.hidden_dim = hidden_dim
self.epsilon = epsilon
self.sinkhorn_iterations = sinkhorn_iterations
self.num_prototypes = num_prototypes
self.queue_length = queue_length
self.use_the_queue = use_the_queue
self.num_crops = num_crops
self.register_buffer(
"queue", torch.zeros(2, self.queue_length, self.projection_dim)
)
self.projection_head = SwAVProjectionHead(
feature_size, hidden_dim, projection_dim
)
self.encoder = nn.Sequential(self.backbone, self.projection_head)
self.prototypes = nn.Linear(
self.projection_dim, self.num_prototypes, bias=False
)
self._init_weights()
@torch.no_grad()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@torch.no_grad()
def sinkhorn(self, Q):
with torch.no_grad():
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1]
K = Q.shape[0]
sum_Q = torch.sum(Q)
Q /= sum_Q
for _ in range(self.sinkhorn_iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def forward(self, x0: torch.Tensor, x1: torch.Tensor, xc: list):
bz = x0.shape[0]
with torch.no_grad(): # normalize prototypes
w = self.prototypes.weight.data.clone()
w = nn.functional.normalize(w, dim=1, p=2)
self.prototypes.weight.copy_(w)
z1, z2 = self.encoder(x1), self.encoder(x1)
z1, z2 = nn.functional.normalize(z1, dim=1, p=2), nn.functional.normalize(
z2, dim=1, p=2
)
z1, z2 = z1.detach(), z2.detach()
c1, c2 = self.prototypes(z1), self.prototypes(z2)
_c1, _c2 = c1.detach(), c2.detach()
with torch.no_grad():
if self.queue is not None:
if self.use_the_queue:
_c1 = torch.cat(
(torch.mm(self.queue[0], self.prototypes.weight.t()), _c1)
)
_c2 = torch.cat(
(torch.mm(self.queue[1], self.prototypes.weight.t()), _c2)
)
self.queue[0, bz:] = self.queue[0, :-bz].clone()
self.queue[0, :bz] = z1
self.queue[1, bz:] = self.queue[1, :-bz].clone()
self.queue[1, :bz] = z2
q1, q2 = self.sinkhorn(_c1)[:bz, :], self.sinkhorn(_c2)[:bz, :]
z_c, c_c = [], []
for x in xc:
z = self.encoder(x)
z = nn.functional.normalize(z, dim=1, p=2)
z = z.detach()
z_c.append(z)
c_c.append(self.prototypes(z))
return (c1, c2, c_c), (q1, q2) | AK-SSL | /AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/models/swav.py | swav.py |
import torch
import copy
import torch.nn as nn
from AK_SSL.models.modules.heads import DINOProjectionHead
class DINO(nn.Module):
"""
DINO: Emerging Properties in Self-Supervised Vision Transformers
Link: https://arxiv.org/abs/2104.14294
Implementation: https://github.com/facebookresearch/dino
"""
def __init__(
self,
backbone: nn.Module,
feature_size: int,
projection_dim: int = 256,
hidden_dim: int = 2048,
bottleneck_dim: int = 256,
temp_student: float = 0.1,
temp_teacher: float = 0.5,
norm_last_layer: bool = True,
momentum_teacher: float = 0.996,
num_crops: int = 6,
use_bn_in_head: bool = False,
**kwargs,
):
super().__init__()
self.backbone = backbone
self.feature_size = feature_size
self.projection_dim = projection_dim
self.hidden_dim = hidden_dim
self.bottleneck_dim = bottleneck_dim
self.temp_student = temp_student
self.temp_teacher = temp_teacher
self.norm_last_layer = norm_last_layer
self.use_bn_in_head = use_bn_in_head
self.momentum_teacher = momentum_teacher # EMA update
self.num_crops = num_crops
self.student_projection_head = DINOProjectionHead(
input_dim=self.feature_size,
hidden_dim=self.hidden_dim,
output_dim=self.projection_dim,
bottleneck_dim=self.bottleneck_dim,
use_bn=self.use_bn_in_head,
norm_last_layer=self.norm_last_layer,
**kwargs,
)
self.student = self.encoder = nn.Sequential(
self.backbone, self.student_projection_head
)
self.teacher_projection_head = DINOProjectionHead(
input_dim=self.feature_size,
hidden_dim=self.hidden_dim,
output_dim=self.projection_dim,
bottleneck_dim=self.bottleneck_dim,
use_bn=self.use_bn_in_head,
norm_last_layer=self.norm_last_layer,
**kwargs,
)
self.teacher = nn.Sequential(
copy.deepcopy(self.backbone), self.teacher_projection_head
)
self._init_teacher()
def _init_teacher(self):
for param_q, param_k in zip(
self.student.parameters(), self.teacher.parameters()
):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
@torch.no_grad()
def _momentum_update_teacher(self):
for param_q, param_k in zip(
self.student.parameters(), self.teacher.parameters()
):
param_k.data = (
self.momentum_teacher * param_k.data
+ (1.0 - self.momentum_teacher) * param_q.data
)
def forward(self, x0: torch.Tensor, x1: torch.Tensor, xc: list):
z1_s, z2_s = self.student(x0), self.student(x1)
zc_s = []
for x in xc:
zc_s.append(self.student(x))
with torch.no_grad():
self._momentum_update_teacher()
z1_t, z2_t = self.teacher(x0), self.teacher(x1)
z_s = [z1_s, z2_s] + zc_s
z_t = [z1_t, z2_t]
return z_s, z_t | AK-SSL | /AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/models/dino.py | dino.py |
import torch
import copy
import torch.nn as nn
from AK_SSL.models.modules.heads import BYOLPredictionHead, BYOLProjectionHead
class BYOL(nn.Module):
"""
BYOL: Bootstrap your own latent: A new approach to self-supervised Learning
Link: https://arxiv.org/abs/2006.07733
Implementation: https://github.com/deepmind/deepmind-research/tree/master/byol
"""
def __init__(
self,
backbone: nn.Module,
feature_size: int,
projection_dim: int = 256,
hidden_dim: int = 4096,
moving_average_decay: float = 0.99,
**kwargs
):
super().__init__()
self.backbone = backbone
self.feature_size = feature_size
self.projection_dim = projection_dim
self.hidden_dim = hidden_dim
self.moving_average_decay = moving_average_decay
self.projection_head = BYOLProjectionHead(
feature_size, hidden_dim, projection_dim
)
self.prediction_head = BYOLPredictionHead(
projection_dim, hidden_dim, projection_dim
)
self.online_encoder = self.encoder = nn.Sequential(
self.backbone, self.projection_head
)
self.target_encoder = copy.deepcopy(
self.online_encoder
) # target must be a deepcopy of online, since we will use the backbone trained by online
self._init_target_encoder()
def _init_target_encoder(self):
for param_o, param_t in zip(
self.online_encoder.parameters(), self.target_encoder.parameters()
):
param_t.data.copy_(param_o.data)
param_t.requires_grad = False
@torch.no_grad()
def _momentum_update_target_encoder(self):
for param_o, param_t in zip(
self.online_encoder.parameters(), self.target_encoder.parameters()
):
param_t.data = self.moving_average_decay * param_t.data + (1.0 - self.moving_average_decay) * param_o.data
def forward(self, x0: torch.Tensor, x1: torch.Tensor):
z0_o, z1_o = self.online_encoder(x0), self.online_encoder(x1)
p0_o, p1_o = self.prediction_head(z0_o), self.prediction_head(z1_o)
with torch.no_grad():
self._momentum_update_target_encoder()
z0_t, z1_t = self.target_encoder(x0), self.target_encoder(x1)
return (p0_o, z0_t), (p1_o, z1_t) | AK-SSL | /AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/models/byol.py | byol.py |
import copy
import torch
import torch.nn as nn
from AK_SSL.models.modules.heads import SimCLRProjectionHead, BYOLPredictionHead
class MoCov3(nn.Module):
"""
MoCo v3: Momentum Contrast v3
Link: https://arxiv.org/abs/2104.02057
Implementation: https://github.com/facebookresearch/moco-v3
"""
def __init__(
self,
backbone: nn.Module,
feature_size: int,
projection_dim: int = 256,
hidden_dim: int = 4096,
moving_average_decay: float = 1.0,
**kwargs
):
"""
Args:
backbone: Backbone network.
feature_size: Number of features.
projection_dim: Dimension of projection head output.
hidden_dim: Dimension of hidden layer in projection head.
moving_average_decay: Decay factor for the moving average of the target encoder.
"""
super().__init__()
self.backbone = backbone
self.feature_size = feature_size
self.projection_dim = projection_dim
self.hidden_dim = hidden_dim
self.moving_average_decay = moving_average_decay
self.projection_head = SimCLRProjectionHead(
input_dim=self.feature_size,
hidden_dim=self.hidden_dim,
output_dim=self.projection_dim,
)
self.encoder_q = self.encoder = nn.Sequential(
self.backbone, self.projection_head
)
self.prediction_head = BYOLPredictionHead(
input_dim=self.projection_dim,
hidden_dim=self.hidden_dim,
output_dim=self.projection_dim,
)
self.encoder_k = copy.deepcopy(self.encoder_q)
self._init_encoder_k()
@torch.no_grad()
def _init_encoder_k(self):
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
@torch.no_grad()
def _update_momentum_encoder(self):
for param_b, param_m in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_m.data = param_m.data * self.moving_average_decay + param_b.data * (
1.0 - self.moving_average_decay
)
def forward(self, x0: torch.Tensor, x1: torch.Tensor):
q0 = self.prediction_head(self.encoder_q(x0))
q1 = self.prediction_head(self.encoder_q(x1))
with torch.no_grad():
self._update_momentum_encoder()
k0 = self.encoder_k(x0)
k1 = self.encoder_k(x1)
return (q0, q1), (k0, k1)
class MoCoV2(nn.Module):
"""
MoCo v2: Momentum Contrast v2
Link: https://arxiv.org/abs/2003.04297
Implementation: https://github.com/facebookresearch/moco
"""
def __init__(
self,
backbone: nn.Module,
feature_size: int,
projection_dim: int = 128,
temperature: float = 0.07,
K: int = 65536,
m: float = 0.999,
):
"""
Args:
backbone: Backbone network.
feature_size: Number of features.
projection_dim: Dimension of projection head output.
K: Number of negative keys.
m: Momentum for updating the key encoder.
"""
super().__init__()
self.backbone = backbone
self.projection_dim = projection_dim
self.feature_size = feature_size
self.temperature = temperature
self.K = K
self.m = m
self.projection_head = SimCLRProjectionHead(
input_dim=self.feature_size,
hidden_dim=self.feature_size,
output_dim=self.projection_dim,
)
self.encoder_q = self.encoder = nn.Sequential(
self.backbone, self.projection_head
)
self.encoder_k = copy.deepcopy(self.encoder_q)
self._init_encoder_k()
self.register_buffer("queue", torch.randn(projection_dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _init_encoder_k(self):
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
@torch.no_grad()
def _momentum_update_encoder_k(self):
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _batch_shuffle_single_gpu(self, x):
idx_shuffle = torch.randperm(x.shape[0]).cuda()
idx_unshuffle = torch.argsort(idx_shuffle)
return x[idx_shuffle], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_single_gpu(self, x, idx_unshuffle):
return x[idx_unshuffle]
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
bz = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % bz == 0
self.queue[:, ptr : (ptr + bz)] = keys.t()
ptr = (ptr + bz) % self.K
self.queue_ptr[0] = ptr
def forward(self, x0: torch.Tensor, x1: torch.Tensor):
q = self.encoder_q(x0)
q = nn.functional.normalize(q, dim=1)
with torch.no_grad():
self._momentum_update_encoder_k()
x1, idx_unshuffle = self._batch_shuffle_single_gpu(x1)
k = self.encoder_k(x1)
k = nn.functional.normalize(k, dim=1)
k = self._batch_unshuffle_single_gpu(k, idx_unshuffle)
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
logits = torch.cat([l_pos, l_neg], dim=1)
logits /= self.temperature
labels = torch.zeros(logits.shape[0], dtype=torch.long).to(q.device)
self._dequeue_and_enqueue(k)
return logits, labels | AK-SSL | /AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/models/moco.py | moco.py |
import torch
import torch.nn as nn
from typing import List, Optional, Tuple
class ProjectionHead(nn.Module):
"""
Description:
Base class for all projection and prediction heads.
Args:
blocks:
List of tuples, each denoting one block of the projection head MLP.
Each tuple reads (in_features, out_features, batch_norm_layer,
non_linearity_layer).
"""
def __init__(
self, blocks: List[Tuple[int, int, Optional[nn.Module], Optional[nn.Module]]]
):
super().__init__()
layers = []
for input_dim, output_dim, batch_norm, non_linearity in blocks:
use_bias = not bool(batch_norm)
layers.append(nn.Linear(input_dim, output_dim, bias=use_bias))
if batch_norm:
layers.append(batch_norm)
if non_linearity:
layers.append(non_linearity)
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor):
return self.layers(x)
class SimCLRProjectionHead(ProjectionHead):
"""
Description:
Initialize a new SimCLRProjectionHead instance.
Args:
input_dim: Number of input dimensions.
hidden_dim: Number of hidden dimensions.
output_dim: Number of output dimensions.
num_layers: Number of hidden layers (2 for v1, 3+ for v2).
batch_norm: Whether or not to use batch norms.
"""
def __init__(
self,
input_dim: int = 2048,
hidden_dim: int = 2048,
output_dim: int = 128,
num_layers: int = 2,
batch_norm: bool = True,
**kwargs,
):
layers: List[Tuple[int, int, Optional[nn.Module], Optional[nn.Module]]] = []
layers.append(
(
input_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim) if batch_norm else None,
nn.ReLU(inplace=True),
)
)
for _ in range(2, num_layers):
layers.append(
(
hidden_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim) if batch_norm else None,
nn.ReLU(inplace=True),
)
)
layers.append(
(
hidden_dim,
output_dim,
nn.BatchNorm1d(output_dim) if batch_norm else None,
None,
)
)
super().__init__(layers)
class BarlowTwinsProjectionHead(ProjectionHead):
"""
Description:
Projection head used for Barlow Twins.
Args:
input_dim: Number of input dimensions.
hidden_dim: Number of hidden dimensions.
output_dim: Number of output dimensions.
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 8192, output_dim: int = 8192
):
super(BarlowTwinsProjectionHead, self).__init__(
[
(
input_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
),
(
hidden_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
),
(hidden_dim, output_dim, None, None),
]
)
class BYOLProjectionHead(ProjectionHead):
"""
Projection head used for BYOL.
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 4096, output_dim: int = 256
):
super(BYOLProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class BYOLPredictionHead(ProjectionHead):
"""
Prediction head used for BYOL.
"""
def __init__(
self, input_dim: int = 256, hidden_dim: int = 4096, output_dim: int = 256
):
super(BYOLPredictionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class SimSiamProjectionHead(ProjectionHead):
"""
Projection head used for SimSiam.
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 2048, output_dim: int = 2048
):
super(SimSiamProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(
hidden_dim,
output_dim,
nn.BatchNorm1d(output_dim, affine=False),
None,
),
]
)
class SimSiamPredictionHead(ProjectionHead):
"""
Prediction head used for SimSiam.
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 512, output_dim: int = 2048
):
super(SimSiamPredictionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class SwAVProjectionHead(ProjectionHead):
"""
Projection head used for SwAV.
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 2048, output_dim: int = 128
):
super(SwAVProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class DINOProjectionHead(nn.Module):
"""
Projection Head for DINO
"""
def __init__(
self,
input_dim: int,
output_dim: int = 256,
use_bn: bool = False,
norm_last_layer: bool = True,
num_layers: int = 3,
hidden_dim: int = 2048,
bottleneck_dim: int = 256,
):
super().__init__()
num_layers = max(num_layers, 1)
if num_layers == 1:
self.mlp = nn.Linear(input_dim, bottleneck_dim)
else:
layers = [nn.Linear(input_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(num_layers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(
nn.Linear(bottleneck_dim, output_dim, bias=False)
)
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x | AK-SSL | /AK_SSL-0.0.1-py3-none-any.whl/AK_SSL/models/modules/heads.py | heads.py |
import time
import datetime
import types
import urllib
import urllib2
import json
mydirs={}
values={}
class BusGpsParser(object):
def __init__(self,db='no',cur='no'):
self.db=db
self.cur=cur
self.dirs={}
"""It's load the source-data and the source-keys
and the line_dict for get the line_id"""
def load(self,data,keys,line_dict):
self.data=data
self.keys=keys
self.line_id_dict=line_dict
"""It's call by self!"""
def subjoin_name(self,pre):
today=datetime.date.today()
subfix=today.strftime("%Y%m%d");
table_name=pre+subfix
return table_name
def parser(self,sp_flag):
temp_dirs={}
lists=self.data.split(sp_flag)
for i in range(0,len(self.keys)):
temp_dirs[self.keys[i]]=lists[i]
self.dirs=temp_dirs
"""The keys argument is very important
It's decide what data you want to save
or update to your database"""
def recondition(self,keys):
self.line_no_key=self.dirs['line_no']
self.direction=self.dirs['direction']
if self.dirs.has_key('gps_date'):
gps_time=self.datetimeTranslate('gps_date','gps_time')
send_time=self.datetimeTranslate('send_date','send_time')
self.dirs['gps_datetime']=gps_time
self.dirs['send_datetime']=send_time
elif self.dirs.has_key('stn_date'):
stn_dt=self.datetimeTranslate('stn_date','stn_time')
self.dirs['stn_dt']=stn_dt
line_id=self.get_line_id()
line_id=str(line_id)
self.dirs['line_id']=line_id
self.keys=keys
"""It's call by self"""
def calculator_line_id(self):
direction=self.direction
line_no=self.line_no_key
if direction == '0':
a_line_no =int (line_no)
a_line_id=a_line_no *10 + 0
return str(a_line_id)
elif direction == '1':
b_line_no =int (line_no)
b_line_id= b_line_no *10 + 1
return str(b_line_id)
else :
print 'Error direction'
"""It's call by self"""
def check_dictionary(self):
line_no_key=self.line_no_key
line_id_dict=self.line_id_dict
if line_id_dict.get(line_no_key) == None:
print 'No key name as: %s' % line_no_key
else:
line_no=(line_id_dict.get(line_no_key))
return str(line_no)
"""It's call by self"""
def get_line_id(self):
line_no_key=self.line_no_key
direction=self.direction
if line_no_key.isdigit():
line_id= self.calculator_line_id()
return str(line_id)
else:
line_no=self.check_dictionary()
self.line_no_key=line_no
line_id= self.calculator_line_id()
return str(line_id)
def datetimeTranslate(self,arg_date,arg_time):
_gps_datetime=self.dirs[arg_date]+' '+self.dirs[arg_time]
_micro_second=time.mktime(time.strptime(_gps_datetime,'%Y-%m-%d %H:%M:%S'))
_datetime="%d" % _micro_second
return _datetime
"""All for sql call ,make it fit to sql syntax"""
def wrap(self,ruler):
sql_data=''
index=2
for var in ruler:
if isinstance(var,str):
sql_data=str(sql_data)+','+'\''+str(self.dirs[self.keys[index]])+'\''
index+=1
else:
for i in range(0,var):
sql_data=str(sql_data)+','+str(self.dirs[self.keys[index]])
index+=1
return '('+sql_data[1:]+')'
def items(self):
sum_items=''
for item in self.keys[2:]:
sum_items=sum_items+','+item
return '('+sum_items[1:]+')'
"""just sql cmd part"""
def equal_value(self):
sum_items=''
for item in self.keys[2:]:
sum_items=sum_items+','+item+'='+'VALUES('+item+')'
return sum_items[1:]
"""It's for update to new table everyday!"""
def tableName(self,arg):
if arg == 'gps':
gps_tn=self.subjoin_name('gps_')
return gps_tn
elif arg == 'stn':
stn_tn=self.subjoin_name('stn_')
return stn_tn
else:
print """Error argu: except:'stn' or 'gps' !!"""
"""It's for your dynamic sql database table"""
def save(self,sql_cmd):
try:
self.cur.execute(sql_cmd)
self.db.commit()
except MySQLdb.Error,e:
print "MySQL Dynamic tables error %d:%s" % (
e.args[0],e.args[1])
"""It's for your static sql database table"""
def update(self,sql_cmd):
try:
self.cur.execute(sql_cmd)
self.db.commit()
except MySQLdb.Error,e:
print "MySQL Static tables erro %d:%s" % (
e.args[0],e.args[1])
"""It's for your check data fast and message push"""
def post(self,url):
values['bus_no']=self.dirs['bus_no']
values['line_id']=self.dirs['line_id']
values['station_no']=self.dirs['station_no']
values['direction']=self.dirs['direction']
values['stn_dt']=self.dirs['stn_dt']
values['flag']=self.dirs['flag']
try:
jdata=json.dumps(values)
req=urllib2.Request(url,jdata)
response = urllib2.urlopen(req)
except Exception,e:
print 'Error url'
pass
def run_all(self):
pass | AKBusGpsParser | /AKBusGpsParser-1.0.0.tar.gz/AKBusGpsParser-1.0.0/AKBusGpsParser.py | AKBusGpsParser.py |
import numpy as np
from os import path,system
from AKCK_LFF.fitted_LFF import g_plus_new, g_plus_dlda
from AKCK_LFF.PW92 import ec_pw92
from AKCK_LFF.g_corradini import g_corradini
from AKCK_LFF.ra_lff import g_plus_ra
from AKCK_LFF.rMCP07 import g_rMCP07, GKI_im_freq
from AKCK_LFF.gauss_quad import GK_global_adap, GK_GA_PINF, qdir, GLQ
from AKCK_LFF.asymptotics import pade_g_plus
pi = np.pi
kf_to_rs = (9.*pi/4.)**(1./3.)
eH_to_eV = 27.211386245988
gopts = {'prec': 1.e-6, 'min_recur': 4, 'err_meas': 'quadpack', 'npts': 3}
ecdir = './ec_data/'
if not path.isdir(ecdir):
system('mkdir -p ' + ecdir)
def freqcut(rs):
c = [1.227277, 5.991171, 0.283892, 0.379981]
bkpt = 40.
if rs <= bkpt:
ff = c[0] + c[1]*rs**c[2]
else:
ff = c[0] + c[1]*bkpt**c[2] + (rs - bkpt)**c[3]
return ff
def qcut(rs):
c = [3.928319, 0.540168, 0.042225, 0.001810, 2.501585]
bkpt1 = 5.
bkpt2 = 60.
if rs <= bkpt1:
f = c[0] + c[1]*rs
elif rs <= bkpt2:
f = c[0] + bkpt1*c[1] + c[2]*(rs - bkpt1) + c[3]*(rs - bkpt1)**2
else:
f = c[0] + bkpt1*c[1] + (bkpt2 - bkpt1)*(c[2] + (bkpt2 - bkpt1)*c[3]) \
+ c[4]*(rs - bkpt2)
return f
def gen_grid(rs,Nq=100,Nlam=100,Nw=100):
Nq += Nq%2
Nw += Nw%2
qfl = qdir + '/GLQ_{:}.csv'.format(Nq)
if path.isfile(qfl):
tq, twgq = np.transpose(np.genfromtxt(qfl,delimiter=',',skip_header=1))
else:
tq, twgq = GLQ(Nq)
ql = np.zeros(Nq + Nq//2)
qwg = np.zeros(Nq + Nq//2)
qc = qcut(rs)
ql[:Nq] = 0.5*qc*(tq + 1.)
qwg[:Nq] = 0.5*qc*twgq
qfl = qdir + '/GLQ_{:}.csv'.format(Nq//2)
if path.isfile(qfl):
tq, twgq = np.transpose(np.genfromtxt(qfl,delimiter=',',skip_header=1))
else:
tq, twgq = GLQ(Nq//2)
tq = 0.5*(tq + 1.)/qc
twgq *= 0.5/qc
ql[Nq:] = 1./tq
qwg[Nq:] = twgq/tq**2
lfl = qdir + '/GLQ_{:}.csv'.format(Nlam)
if path.isfile(lfl):
tlam, twgl = np.transpose(np.genfromtxt(lfl,delimiter=',',skip_header=1))
else:
tlam, twgl = GLQ(Nlam)
lam = 0.5*(1. + tlam)
lamwg = 0.5*twgl
wfl = qdir + '/GLQ_{:}.csv'.format(Nw)
if path.isfile(wfl):
tw, twgw = np.transpose(np.genfromtxt(wfl,delimiter=',',skip_header=1))
else:
tw, twgw = GLQ(Nw)
wl = np.zeros(Nw + Nw//2)
wwg = np.zeros(Nw + Nw//2)
uc = freqcut(rs)
wl[:Nw] = 0.5*uc*(tw + 1.)
wwg[:Nw] = 0.5*uc*twgw
wfl = qdir + '/GLQ_{:}.csv'.format(Nw//2)
if path.isfile(wfl):
tw, twgw = np.transpose(np.genfromtxt(wfl,delimiter=',',skip_header=1))
else:
tw, twgw = GLQ(Nw//2)
tw = 0.5*(tw + 1.)
twgw *= 0.5
wl[Nw:] = uc - np.log(1. - tw)
wwg[Nw:] = twgw/(1. - tw)
grid = np.zeros((ql.shape[0]*lam.shape[0]*wl.shape[0],4))
ipt = 0
for iq in range(ql.shape[0]):
for ilam in range(lam.shape[0]):
for iw in range(wl.shape[0]):
grid[ipt,0] = ql[iq]
grid[ipt,1] = lam[ilam]
grid[ipt,2] = wl[iw]
grid[ipt,3] = qwg[iq]*lamwg[ilam]*wwg[iw]
ipt += 1
return grid
def ec_from_chi(rs,grid,fxc='RPA'):
kf = kf_to_rs/rs
q = kf*grid[:,0]
qscl = q/grid[:,1]
vcoul_scl = 4*pi*grid[:,1]/q**2
rs_scl = rs*grid[:,1]
if fxc == 'RPA':
gplus = 0.
elif fxc == 'NEW':
gplus = g_plus_new(qscl,rs_scl)
elif fxc == 'COR':
gplus = g_corradini(qscl,\
{'rs': rs_scl, 'rsh': rs_scl**(0.5),'kF': kf_to_rs/rs_scl, \
'n': 3./(4.*pi*rs_scl**3)})
fxch = vcoul_scl*(1. - gplus)
chi0 = chi0_im_freq(kf,grid[:,0]/2.,grid[:,2]/grid[:,0])
itgrd = chi0**2*fxch/(1. - chi0*fxch)
ec = -3.*np.dot(grid[:,3],itgrd)
return ec
def chi0_im_freq(kf,z,wt):
# z = q / (2 * kf)
# wt = Im (omega) / kf**2 * (kf / q)
log_fac = np.log( (wt**2 + (z + 1.)**2)/(wt**2 + (z - 1.)**2) )
chi0 = 1./(2.*pi**2) * ( (z**2 - wt**2 - 1.)/(4.*z) *log_fac - 1. \
+ wt*np.arctan((1. + z)/wt) + wt*np.arctan((1. - z)/wt) )
return kf*chi0
def regularize(arr,thresh):
msk = np.abs(arr) < thresh
arr[msk] = thresh*np.sign(arr[msk])
arr[arr==0.] = thresh
return arr
def ec_freq_integrand(u,x,lam,rs,fxc='RPA'):
kf = kf_to_rs/rs
q = kf*x
qscl = q/lam
vcoul_scl = 4*pi*lam/q**2
rs_scl = rs*lam
dv = {'rs': rs_scl, 'rsh': rs_scl**(0.5),'kF': kf_to_rs/rs_scl, \
'n': 3./(4.*pi*rs_scl**3)}
if fxc == 'RPA':
gplus = 0.
elif fxc == 'NEW':
gplus = g_plus_new(qscl,rs_scl)
elif fxc == 'COR':
gplus = g_corradini(qscl,dv)
elif fxc == 'RAS':
gplus = g_plus_ra(qscl,0.,rs_scl)
elif fxc == 'RAD':
gplus = g_plus_ra(qscl,u*(kf/lam)**2,rs_scl)
elif fxc == 'rMCP07':
gplus = g_rMCP07(qscl,u*(kf/lam)**2,dv)
elif fxc == 'NEWD':
gplus = g_plus_dlda(qscl,u*(kf/lam)**2,rs_scl)
elif fxc == 'PADE':
gplus = pade_g_plus(qscl,rs_scl)
fxch = vcoul_scl*(1. - gplus)
chi0 = chi0_im_freq(kf,x/2.,u/x)
itgrd = np.zeros(u.shape)
#tmsk = (u > 1.e-3) & (x > 1.e-3)
#itgrd[tmsk] = chi0[tmsk]**2*fxch/(1. - chi0[tmsk]*fxch)
iden = regularize(1. - chi0*fxch,1.e-18)
if hasattr(x,'__len__'):
msk = x > 1.e-3
itgrd[msk] = chi0[msk]**2*fxch[msk]/iden[msk]
else:
if x > 1.e-3:
itgrd = chi0**2*fxch/iden
return itgrd
def wvvctr_integrand(tx,lam,rs,fxc='RPA', uc=4., qc=2.):
topts = gopts.copy()
topts['prec'] = 1.e-7
topts['breakpoint'] = freqcut(rs)
i1l = np.zeros(tx.shape)
for j in range(tx.shape[0]):
i1l[j], msg = GK_global_adap(ec_freq_integrand, (0.,uc), opt_d=topts, \
args=(tx[j],lam,rs), kwargs={'fxc':fxc})
#i1l[j], msg = GK_GA_PINF(ec_freq_integrand,0., opt_d=topts, \
# args=(tx[j],lam,rs), kwargs={'fxc':fxc})
return i1l
def lam_integrand(lam,rs,fxc='RPA',uc = 4., qc = 2.):
topts = gopts.copy()
#topts['prec'] = 1.e-7
topts['breakpoint'] = qcut(rs)
i2l = np.zeros(lam.shape)
for j in range(lam.shape[0]):
i2l[j], msg = GK_global_adap(wvvctr_integrand, (0.,qc), opt_d=gopts, \
args=(lam[j],rs), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
#i2l[j], msg = GK_GA_PINF(wvvctr_integrand,0., opt_d=topts, \
# args=(lam[j],rs), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
return i2l
"""
def wvvctr_integrand(tx,lam,rs,fxc='RPA',uc = 4., qc = 2.):
i1_a, msg_a = GK_global_adap(ec_freq_integrand, (0.,.5), opt_d=gopts, \
args=(tx,lam,rs), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
#ifrec_inv = lambda v : ec_freq_integrand(1./v,tx,lam,rs,fxc=fxc)/v**2
i1_b, msg_b = GK_global_adap(ec_freq_integrand, (.5,1.), opt_d=gopts, \
args=(tx,lam,rs), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
return i1_a + i1_b
def lam_integrand(lam,rs,fxc='RPA',uc = 6., qc = 10.):
i2_a, msg_a = GK_global_adap(wvvctr_integrand, (0.,.5), opt_d=gopts, \
args=(lam,rs), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
#iwv_inv = lambda p : wvvctr_integrand(1./p,lam,rs,fxc=fxc)/p**2
#i2_b, msg_b = GK_global_adap(iwv_inv,(0.,1./qc),opt_d=gopts)
i2_b, msg_b = GK_global_adap(wvvctr_integrand, (.5,1.), opt_d=gopts, \
args=(lam,rs), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
return i2_a + i2_b
#"""
def get_ec(rs,fxc='RPA'):
tgrid = gen_grid(rs)
ecden = ec_freq_integrand(tgrid[:,2],tgrid[:,0],tgrid[:,1],rs,fxc=fxc)
return -3.*np.dot(tgrid[:,3],ecden)
def get_ec_GK(rs,fxc='RPA',uc = 1., qc = 2.5):
topts = gopts.copy()
topts['prec'] = 1.e-7
i3,msg = GK_global_adap(lam_integrand,(0.,1.),opt_d=topts, \
args =(rs,), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
if msg['code'] < 1 or np.isnan(i3) or np.isinf(i3):
td = gopts.copy()
for tprec in [1.e-7,1.e-6,1.e-5]:
td['prec'] = tprec
i3,msg = GK_global_adap(lam_integrand,(0.,1.),opt_d=td, \
args =(rs,), kwargs={'fxc':fxc, 'uc': uc, 'qc': qc})
if msg['code'] == 1 and not np.isnan(i3) and not np.isinf(i3):
break
return -3.*i3
def ec_rpa_unp(rs):
# J. P. Perdew and Y. Wang, PRB 45, 13244 (1992).
# doi: 10.1103/PhysRevB.45.13244
def g(v,rs):
q0 = -2.0*v[0]*(1.0 + v[1]*rs)
q1 = 2.0*v[0]*(v[2]*rs**(0.5) + v[3]*rs + v[4]*rs**(1.5) + v[5]*rs**(1.75))
return q0*np.log(1.0 + 1.0/q1)
return g([0.031091,0.082477,5.1486,1.6483,0.23647,0.20614],rs)
def gen_dat_files(gpl):
rs_min = 1.e-1
rs_max = 120.
Nrs = 100
rsl = np.exp(np.linspace(np.log(rs_min),np.log(rs_max),Nrs))
for gpstr in gpl:
ec = np.zeros(Nrs)
for irs, rs in enumerate(rsl):
ec[irs] = get_ec(rs,fxc=gpstr)
#print(rs,ec[irs])
np.savetxt(ecdir + '/eps_c_{:}.csv'.format(gpstr), \
np.transpose((rsl,ec)), delimiter=',', header='rs, eps_c')
return
def corr_plots(gpl = ['NEW','COR','RAS','RAD','rMCP07']):
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from AKCK_LFF.global_plot_pars import colors, lsls
label_d = {
'RPA': 'RPA', 'NEW': 'This work', 'COR': 'Corradini $\mathrm{\it et \, al.}$',
'RAS': 'RA, static', 'RAD': 'RA', 'rMCP07': 'rMCP07',
'PADE': 'Padé', 'NEWD': 'NEWD'}
lim_d = {'NEW': 33. , 'RAS': 38., 'RAD': 38.}
missing_dat = []
for gp in gpl:
if not path.isfile(ecdir + '/eps_c_{:}.csv'.format(gp)):
missing_dat.append(gp)
gen_dat_files(missing_dat)
fig, ax = plt.subplots(figsize=(6,4))
xbds = [1e20,-1e20]
ybds = [1e20,-1e20]
for gp in gpl:
tdat = np.genfromtxt(ecdir + '/eps_c_{:}.csv'.format(gp), \
delimiter=',', skip_header=1)
tdat2 = []
for i in range(tdat.shape[0]):
if np.isinf(tdat[i,1]) or np.isnan(tdat[i,1]):
continue
tdat2.append([tdat[i,0],tdat[i,1]])
tdat = np.array(tdat2).copy()
pw92, _, _, _ = ec_pw92(tdat[:,0],0.)
if gp in lim_d:
msk = tdat[:,0] <= lim_d[gp]
else:
msk = np.ones(tdat.shape[0],dtype=bool)
#tfun = (tdat[msk,1]-pw92[msk])*eH_to_eV
tfun = 100*(1. - tdat[msk,1]/pw92[msk])
ax.plot(tdat[msk,0],tfun,color=colors[gp],label=label_d[gp],\
linestyle=lsls[gp])
xbds = [min(xbds[0],tdat[:,0].min()),max(xbds[1],tdat[:,0].max() )]
ybds = [min(ybds[0],tfun.min()),max(ybds[1],tfun.max() )]
#rsl = np.linspace(xbds[0],xbds[1],2000)
#pw92, _, _, _ = ec_pw92(rsl,0.)
#ax.plot(rsl,pw92,color='k')
#ax.plot(rsl,ec_rpa_unp(rsl),color='tab:green',linestyle=':')
#ax.set_ylim(1.02*ybds[0],1.02*ybds[1])
ax.set_ylim(-10.,20.)
ax.set_xlim(*xbds)
ax.hlines(0.,*xbds,color='k',linestyle=':',linewidth=1)
ax.set_xscale('log')
ax.set_xlabel('$r_\\mathrm{s}$ (bohr)',fontsize=14)
#ax.set_ylabel(r'$\varepsilon_\mathrm{c}(r_\mathrm{s},0) - \varepsilon_\mathrm{c}^\mathrm{PW92}(r_\mathrm{s},0)$ (eV/elec.)',fontsize=12)
ax.set_ylabel(r'$\varepsilon_\mathrm{c}(r_\mathrm{s},0)$ PD (\%)',fontsize=12)
ax.yaxis.set_minor_locator(MultipleLocator(1.))
ax.yaxis.set_major_locator(MultipleLocator(5.))
ax.legend(fontsize=12,frameon=False,ncol=1)
#plt.show();exit()
plt.savefig('./ec_data/eps_c_err.pdf',dpi=600,bbox_inches='tight')
return
def RPA_sanity_check():
rsl = [0.1, 0.5, 1.,2.,3.,4.,5.,10.,20.,40.,60.,80.,100.,120.]
tstr = r'$\rs$ & $\varepsilon\suc^\mathrm{RPA}(\rs)$ & $\varepsilon\suc^\mathrm{PW-RPA}(\rs)$ & Percent Deviation (\%) \\ \hline' + ' \n'
print(' rs eps_c' + ' '*11 + 'PE from PW92 (%)')
for rs in rsl:
ec_rpa = get_ec(rs,fxc='RPA')
ec_pw_rpa = ec_rpa_unp(rs)
tpe = 100*(1. - ec_rpa/ec_pw_rpa)
print('{:} {:.6f} {:.2f}'.format(rs,ec_rpa,tpe))
tstr += '{:} & {:.6f} & {:.6f} & {:.2f} \\\\ \n'.format(rs,ec_rpa,ec_pw_rpa,tpe )
with open('./ec_data/rpa_sanity.tex','w+') as tfl:
tfl.write(tstr)
return
if __name__ == "__main__":
#RPA_sanity_check() ; exit()
corr_plots()
exit()
#"""
for rs in [1.,2.,3.,4.,5.,10.,100.]:
ec_rpa = ec_from_chi(rs,pts)
print(rs,ec_rpa,100*(1. - ec_rpa/ec_rpa_unp(rs)) )
exit()
w = [1.e-6,0.5,1.,5.]
ql = np.linspace(1.e-6,5.,5000)
rs = 1.
kf = kf_to_rs/rs
for aw in w:
plt.plot(ql,chi0_im_freq(kf,ql,aw))
plt.show() | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/corr.py | corr.py |
import numpy as np
from AKCK_LFF.PW92 import g0_unp_pw92_pade, ec_pw92
pi = np.pi
alpha = (4./(9*pi))**(1./3.)
def get_lambdas(rs):
ec, d_ec_drs, d_ec_drs2, d_ec_dz2 = ec_pw92(rs,0.0)
# conversion to Rydberg
ec *= 2.
d_ec_drs *= 2.
d_ec_drs2 *= 2.
d_ec_dz2 *= 2.
# Eq. 40, corrected by Lein, Gross, and Perdew
lam_s_inf = 3./5. - 2*pi*alpha*rs/5.*(rs*d_ec_drs + 2*ec)
# Eq. 44
lam_pade = -0.11*rs/(1 + 0.33*rs)
# Eq. 39, corrected by Lein, Gross, and Perdew
sfac = 1. - 3*(2.*pi/3.)**(2./3.)*rs*d_ec_dz2/2.
lam_n_0 = lam_pade*sfac
lam_a_0 = sfac - lam_n_0
# Eq. 38
lam_s_0 = 1. + pi/3*alpha*rs**2*(d_ec_drs - rs*d_ec_drs2/2.) - lam_n_0
lam_n_inf = 3*pi*alpha*rs*(ec + rs*d_ec_drs)
# just below Eq. 39
g0 = g0_unp_pw92_pade(rs)
lam_a_inf = (2*g0 - 1.)/3.
return lam_s_0, lam_s_inf, lam_n_0, lam_n_inf, lam_a_0, lam_a_inf
def lff_ra_symm(q,w,rs):
"""
NB: q = (wavevector in a.u.)/(2*kf), w = (frequency in a.u.)/(2*kf**2)
There are at least three alpha's in the RA paper
alpha is determined from exact constraints, and is used in the lambdas (lam_)
alp is a parameter used to control the parameterization, and is used in a, b and c
"""
alp = 0.9
g0 = g0_unp_pw92_pade(rs)
omg0 = 1. - g0
lam_s_0, lam_s_inf, _, _, _, _ = get_lambdas(rs)
"""
p. 57 of RA work: per Phys Rev style (https://journals.aps.org/prl/authors):
"Note that the solidus (/) in fractions, for example 1/2a, means 1/(2a) and not (1/2)a."
which to me implies that what is meant is 9/(16*[1 - g(0)]) and not 9[1-g(0)]/16.
"""
gam_s = 9*lam_s_inf/(16*omg0) + (4.*alp - 3.)/(4.*alp)
# Eq. 56
a_s = lam_s_inf + (lam_s_0 - lam_s_inf)/(1. + (gam_s*w)**2)
# Eq. 55
c_s = 3*lam_s_inf/(4*omg0) - (4/3 - 1./alp + \
3*lam_s_inf/(4*omg0))/(1 + gam_s*w)
# Eq. 54
b_s = a_s/( ( (3*a_s - 2*c_s*omg0)*(1. + w) - 8*omg0/3. )*(1. + w)**3 )
q2 = q**2
q6 = q2**3
# Eq. 53
g_s = q2*(a_s + 2*b_s*omg0*q6/3.)/(1. + q2*(c_s + b_s*q6))
return g_s
def lff_ra_occ(q,w,rs):
"""
NB: q = (wavevector in a.u.)/(2*kf), w = (frequency in a.u.)/(2*kf**2)
"""
gam_n = 0.68
gnw = gam_n*w
gnw2 = gnw*gnw
opgnw = 1. + gnw
_, _, lam_n_0, lam_n_inf, _, _ = get_lambdas(rs)
"""
Eq. 65. Note that there is a "gamma" instead of "gamma_n" in the printed version of a_n
assuming this just means gamma_n
"""
a_n = lam_n_inf + (lam_n_0 - lam_n_inf)/(1. + gnw2)
"""
Eq. 64
in this equation, "gam_n(w)" is printed twice. I'm assuming this just means
gam_n, since this is constant. That seems to give OK agreement with their figure
"""
c_n = 3*gam_n/(1.18*opgnw) - ( (lam_n_0 + lam_n_inf/3)/(lam_n_0 + 2*lam_n_inf/3) \
+ 3*gam_n/(1.18*opgnw))/(1. + gnw2)
# Eq. 63
bt = a_n + lam_n_inf*(1. + 2/3*c_n*opgnw)
b_n = -3/(2*lam_n_inf*opgnw**2)*( bt + (bt**2 + 4/3*a_n*lam_n_inf)**(0.5) )
q2 = q**2
q4 = q2*q2
# Eq. 62
g_n = q2*(a_n - lam_n_inf * b_n*q4/3.)/(1. + q2*(c_n + q2*b_n))
return g_n
def lff_ra_asymm(q,w,rs):
"""
NB: q = (wavevector in a.u.)/(2*kf), w = (frequency in a.u.)/(2*kf**2)
"""
# NB symmetric and occupation G's include factor of 1/(2*kF)**2
# because they are multiplied by a factor of 1/q**2 to get kernel
# not needed here, because G_a(q-->0) = constant(i w)
g0 = g0_unp_pw92_pade(rs)
_, _, _, _, lam_a_0, lam_a_inf = get_lambdas(rs)
# below 61
gam_a = 9.*lam_a_inf/8. + 1./4.
gaw2 = (gam_a*w)**2
# 61
beta = (4*g0 - 1.)/3. -lam_a_inf*gaw2/(3.*(1. + gaw2))
# 60
a_a = lam_a_inf + (lam_a_0 - lam_a_inf)/(1. + gaw2)
# 59, not sure if gamma_a**2 * w is a typo
# should be (gamma_a * w)**2
c_a = 3.*lam_a_inf/2. - (1./3. + 3.*lam_a_inf/2.)/(1. + gaw2)
# 58, assuming c_s is a typo, means c_a
opw = 1. + w
opw3 = opw**3
opw4 = opw3*opw
b_a = a_a/(3.*a_a*opw4 - 4*beta*opw3 - 3*c_a*beta*opw4)
q2 = q*q
q8 = q2**4
ga = lam_a_inf*gaw2/(1. + gaw2) + (a_a*q2 + b_a*beta*q8)/(1. + c_a*q2 + b_a*q8)
return ga
def g_minus_ra(q,w,rs):
"""
NB:
q = wavevector in hartree a.u., bohr
w = imaginary part of the frequency in hartree a.u.
C.F. Richardson and N.W. Ashcroft,
Phys. Rev. B 50, 8170 (1994),
and
Eq. 32 of M. Lein, E.K.U. Gross, and J.P. Perdew,
Phys. Rev. B 61, 13431 (2000)
"""
kf = (9*pi/4.)**(1./3.)/rs
z = q/(2.*kf)
u = w/(2.*kf**2)
ga = lff_ra_asymm(z,u,rs)
gn = lff_ra_occ(z,u,rs)
return ga + gn
def g_plus_ra(q,w,rs):
"""
NB:
q = wavevector in hartree a.u., bohr
w = imaginary part of the frequency in hartree a.u.
C.F. Richardson and N.W. Ashcroft,
Phys. Rev. B 50, 8170 (1994),
and
Eq. 32 of M. Lein, E.K.U. Gross, and J.P. Perdew,
Phys. Rev. B 61, 13431 (2000)
"""
kf = (9*pi/4.)**(1./3.)/rs
z = q/(2.*kf)
u = w/(2.*kf**2)
gs = lff_ra_symm(z,u,rs)
gn = lff_ra_occ(z,u,rs)
return gs + gn
if __name__ == "__main__":
import matplotlib.pyplot as plt
rs = 2.0
kf = (9.*pi/4.)**(1./3.)/rs
ql = np.linspace(0.0,4.0,2000)
tfac = ql**2
"""
fxcst = fxc_ra(ql/(2*kf),0.0,rs)
plt.plot(ql,fxcst)
plt.ylim(-4.5,0)
plt.xlim(0,9)
plt.show()
exit()
"""
lsls = ['-','--']
for iw,w in enumerate([2.]):#[0.5, 2]):
gs = lff_ra_symm(ql,w/(2.*kf**2),rs)
gn = lff_ra_occ(ql,w/(2.*kf**2),rs)
ga = lff_ra_asymm(ql,w/(2.*kf**2),rs)
#plt.plot(ql,(gs+gn)*tfac,label="$G_s(q,iw), w= {:}$".format(w),color='darkblue', \
# linestyle=lsls[iw])
plt.plot(ql,gs,label="$G_s(q,iw), w= {:}$".format(w),color='darkblue', \
linestyle=lsls[iw])
plt.plot(ql,gn,label="$G_n(q,iw), w= {:}$".format(w),color='darkorange', \
linestyle=lsls[iw])
plt.plot(ql,ga,label="$G_a(q,iw), w= {:}$".format(w),color='darkgreen', \
linestyle=lsls[iw])
plt.legend(title='$r_s={:}$'.format(rs))
plt.xlabel('$q/(2k_F)$')
plt.ylabel('$G(q,iw)$')
plt.ylim([-1.0,4.0])
plt.show()
exit()
rsl = [0.01, 0.1, 0.5, 1,2,5,10,20,100]
for rs in rsl:
gxc = g0_unp_pw92_pade(rs)
print('{:}, {:.3f}, {:.3f}'.format(rs,gxc,g0_unp_yasuhara(rs))) | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/ra_lff.py | ra_lff.py |
import numpy as np
pi = np.pi
kf_to_rs = (9.*pi/4.)**(1./3.)
def spinf(z,pow):
opz = np.minimum(2,np.maximum(0.0,1+z))
omz = np.minimum(2,np.maximum(0.0,1-z))
return (opz**pow + omz**pow)/2.0
def ts(rs,z):
# kinetic energy per electron
ts0 = 3./10.*(kf_to_rs/rs)**2
ds = spinf(z,5./3.)
return ts0*ds
def epsx(rs,z):
# exchange energy per electron
ex0 = -3./(4.*pi)*kf_to_rs/rs
dx = spinf(z,4./3.)
return ex0*dx
def ke_ex(rs,z):
return ts(rs,z) + epsx(rs,z)
def get_ck_chi_enh():
"""
Table I of C.A. Kukkonen and K. Chen,
Phys. Rev. B 104, 195142 (2021),
doi: 10.1103/PhysRevB.104.195142
"""
rsl = np.array([1.,2.,3.,4.,5.])
chi_chi0 = np.array([1.152,1.296,1.438,1.576,1.683])
ucrt = np.array([2.e-3,6.e-3,9.e-3,9.e-3,1.5e-2])
#kfl = kf_to_rs/rsl
#ac = kfl**2/3.*(1./chi_chi0 - 1. + 1./(pi*kfl))
return rsl, chi_chi0, ucrt
#return rsl, ac, kfl**2/3./ucrt#
def get_CA_ec():
etot_d = {
0 : np.transpose((
np.array([1.0,2.0,5.0,10.0,20.0,50.0,100.0]),
np.array([1.174,0.0041,-0.1512,-0.10675,-0.06329,-0.02884,-0.015321]),
np.array([1.e-3,4.e-4,1.e-4,5.e-5,3.e-5,1.e-5,5.e-6])
)),
1 : np.transpose((
np.array([2.0,5.0,10.0,20.0,50.0,100.0]),
np.array([0.2517,-0.1214,-0.1013,-0.06251,-0.02878,-0.015340]),
np.array([6.e-4,2.e-4,1.e-4,3.e-5,2.e-5,5.e-6])
))
}
npts = 0
for az in etot_d:
etot_d[az][:,1] /= 2. # conversion from Rydberg
etot_d[az][:,2] /= 2.
etot_d[az][:,1] -= ke_ex(etot_d[az][:,0],1.*az)
npts += etot_d[az].shape[0]
return etot_d, npts
def get_HM_QMC_dat():
"""
Tables II-IV of Supplemental Material of
M. Holzmann and S. Moroni, Phys. Rev. Lett. 124, 206404 (2020).
doi: 10.1103/PhysRevLett.124.206404
These are zero-variance extrapolated DMC values without SJ
"""
etot_d = {
70 : np.transpose(( \
np.array([0., 0.42, 0.61, 0.79, 1.0]), \
np.array([-21.37090, -21.36832, -21.36563,-21.36097, -21.34945]), \
np.array([2.6e-4, 1.9e-4, 1.3e-4, 9.6e-4, 7.7e-4]) \
)),
100: np.transpose(( \
np.array([0., 0.18, 0.42, 0.61, 0.79, 1.0]), \
np.array([-15.38914, -15.38876, -15.38857, -15.38811, -15.38679, -15.38325]),\
np.array([1.7e-4, 1.5e-4, 9.e-5, 2.1e-4, 9e-5, 3.e-5])
)),
120: np.transpose((\
np.array([0., 0.18, 0.42, 0.61, 0.79, 1.0]), \
np.array([-12.98786, -12.98758, -12.98760, -12.98748, -12.98663, -12.98470]), \
np.array([2.7e-4, 1.2e-4, 5.e-5, 1.7e-4, 8.e-5, 7.e-5])
))
}
# conversion from mRy to Hartree
conv_f = 5.e-4
npts = 0
ec_d = {}
for rs in etot_d:
etot_d[rs][:,1] *= conv_f
etot_d[rs][:,2] *= conv_f
npts += etot_d[rs].shape[0]
ec_d[rs] = etot_d[rs].copy()
etot_no_c = ke_ex(rs,ec_d[rs][:,0])
ec_d[rs][:,1] -= etot_no_c
return ec_d, npts
def get_AD_DMC_dat():
"""
Data from Table IV of S. Azadi and N.D. Drummond,
Phys. Rev. B 105, 245135 (2022),
DOI: 10.1103/PhysRevB.105.245135
"""
# first column is zeta, second is total energy in mHa, third is uncertainty in mHa
etot_d = {
30 : np.transpose((
np.array([0.0, 0.5, 1.0]),
-np.array([22.617,22.5862,22.4804]),
np.array([8.e-3,5.e-4,6.e-4])
)),
40 : np.transpose((
np.array([0.0, 0.5, 1.0]),
-np.array([17.612,17.597,17.555]),
np.array([4.e-3,2.e-3,2.e-3]),
)),
60 : np.transpose((
np.array([0.0, 0.5, 1.0]),
-np.array([12.254,12.2492,12.2413]),
np.array([3.e-3,4.e-4,1.e-4])
)),
80 : np.transpose((
np.array([0.0, 0.5, 1.0]),
-np.array([9.4250,9.421,9.4242]),
np.array([9.e-4,1.e-3,2.e-4])
)),
100 : np.transpose((
np.array([0.0, 0.5, 1.0]),
-np.array([7.6702,7.669976,7.6717]),
np.array([4.e-4,7.e-6,9.e-4])
))
}
npts = 0
for rs in etot_d:
etot_d[rs][:,1] *= 1.e-3 # conversion from mHa to Hartree
etot_d[rs][:,2] *= 1.e-3
etot_d[rs][:,1] -= ke_ex(rs,etot_d[rs][:,0])
npts += etot_d[rs].shape[0]
return etot_d, npts
if __name__ == "__main__":
import matplotlib.pyplot as plt
hm_dat, nhm = get_HM_QMC_dat()
ad_dat, nad = get_AD_DMC_dat()
zl = np.linspace(0.,1.,2000)
fz = ((1. + zl)**(4./3.) + (1. - zl)**(4./3.) - 2.)/(2.**(4./3.) - 2.)
plt.plot(zl,fz)
for adict in [hm_dat,ad_dat]:
for ars in adict:
fz = (adict[ars][:,1] - adict[ars][0,1])/(adict[ars][-1,1] - adict[ars][0,1])
plt.scatter(adict[ars][:,0],fz)
plt.show(); exit()
# sanity check plot, should look like Fig. 3 of Holzmann and Moroni
etot_d,_ = get_HM_QMC_dat()
colors = ['purple','darkgreen','darkblue']
for irs, rs in enumerate(etot_d):
etot_d[rs][:,1] += ke_ex(rs,etot_d[rs][:,0])
epol = rs**(3./2.)*(etot_d[rs][:,1] - etot_d[rs][0,1])*2.e3
sclucrt = rs**(3./2.)*etot_d[rs][:,2]*2.e3
plt.errorbar(etot_d[rs][:,0],epol,yerr=sclucrt,color=colors[irs],\
markersize=3,marker='o',linewidth=0,elinewidth=1.5,\
label='$r_\\mathrm{s}='+'{:}$'.format(rs))
#plt.xlim(0.,1.)
plt.ylim(0.,15.)
plt.show()
plt.close()
colors = ['r','b','g','darkblue','k']
etot_d, _ = get_AD_DMC_dat()
for irs, rs in enumerate(etot_d):
plt.errorbar(etot_d[rs][:,0],etot_d[rs][:,1],yerr=etot_d[rs][:,2],\
color=colors[irs],\
markersize=3,marker='o',linewidth=0,elinewidth=1.5,\
label='$r_\\mathrm{s}='+'{:}$'.format(rs))
plt.ylim(-9.e-3,-2.e-3)
plt.show() | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/QMC_data.py | QMC_data.py |
import numpy as np
import matplotlib.pyplot as plt
from os import path, system
from AKCK_LFF.fitted_LFF import g_plus_new, g_minus_new
from AKCK_LFF.asymptotics import get_g_minus_pars, get_g_plus_pars
from AKCK_LFF.ra_lff import g_plus_ra, g_minus_ra
from AKCK_LFF.g_corradini import g_corradini
#from AKCK_LFF.mcp07_static import mcp07_static
from AKCK_LFF.global_plot_pars import colors, lsls
pi = np.pi
rs_to_kf = (9*pi/4.)**(1./3.)
rdir = path.dirname(path.realpath(__file__)) + '/'
if not path.isdir('./figs/'):
system('mkdir ./figs')
def gplus_plots():
xl = np.linspace(0.0,4.0,5000)
xlp = xl[1:]
rs_l = [0.1,1,2,5,10,100]
for irs, rs in enumerate(rs_l):
fig, ax = plt.subplots(2,1,figsize=(5,7.5))
kf = rs_to_kf/rs
ckgpf = rdir + '/data_files/CK_Gplus_rs_{:}.csv'.format(int(rs))
if path.isfile(ckgpf):
tdat_CK = np.genfromtxt(ckgpf,delimiter=',',skip_header=1)
tq2_CK = tdat_CK[:,0]**2#(tdat_CK[:,0]*kf)**2
ax[0].errorbar(tdat_CK[:,0],tdat_CK[:,1],yerr=tdat_CK[:,2],\
color=colors['CKKC'],\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
ax[1].errorbar(tdat_CK[:,0],4.*pi*tdat_CK[:,1]/tq2_CK, \
yerr=4.*pi*tdat_CK[:,2]/tq2_CK, color=colors['CKKC'],\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
mcsgpf = rdir + '/data_files/MCS_Gplus_rs_{:}.csv'.format(int(rs))
if path.isfile(mcsgpf):
tdat_MCS = np.genfromtxt(mcsgpf,delimiter=',',skip_header=1)
tq2_MCS = tdat_MCS[:,0]**2#(tdat_MCS[:,0]*kf)**2
ax[0].errorbar(tdat_MCS[:,0],tdat_MCS[:,1], \
yerr=tdat_MCS[:,2],color=colors['MCS'],\
markersize=3,marker='s',linewidth=0,elinewidth=1.5)
ax[1].errorbar(tdat_MCS[:,0],4.*pi*tdat_MCS[:,1]/tq2_MCS, \
yerr=4.*pi*tdat_MCS[:,2]/tq2_MCS, color=colors['MCS'], \
markersize=3,marker='s',linewidth=0,elinewidth=1.5)
kf2 = kf*kf
kl2 = xlp**2#(xlp*kf)**2
a,b,c = get_g_plus_pars(rs)
ax[0].plot(xl,a*xl**2,color=colors['SQE'],linestyle=lsls['SQE'],\
label='SQE')
ax[1].plot(xlp,4.*pi*a*np.ones_like(xlp),\
color=colors['SQE'],linestyle=lsls['SQE'], label='SQE')
ax[0].plot(xl,c*xl**2 + b, \
color=colors['LQE'], linestyle=lsls['LQE'], label='LQE')
ax[1].plot(xlp,4.*pi*(c + b/kl2),\
color=colors['LQE'],linestyle=lsls['LQE'],label='LQE')
gpapp = g_plus_new(xl*kf,rs)
ax[0].plot(xl,gpapp,color=colors['NEW'], label='This work',\
linestyle=lsls['NEW'])
gpapp_oq2 = 4.*pi*g_plus_new(xlp*kf,rs)/kl2
ax[1].plot(xlp,gpapp_oq2,color=colors['NEW'],\
label='This work',linestyle=lsls['NEW'])
gp_ra = g_plus_ra(xl*kf,0.,rs)
ax[0].plot(xl,gp_ra,color=colors['RA'],linestyle=lsls['RA'],\
label='Richardson-Ashcroft')
gp_ra_oq2 = 4.*pi*g_plus_ra(xlp*kf,0.,rs)/kl2
ax[1].plot(xlp,gp_ra_oq2,color=colors['RA'],linestyle=lsls['RA'],\
label='Richardson-Ashcroft')
dens_d = {'rs': rs, 'kF': kf, 'n': 3./(4.*pi*rs**3), 'rsh': rs**(0.5)}
gcorr = g_corradini(xl*kf,dens_d)
ax[0].plot(xl,gcorr, color=colors['COR'],linestyle=lsls['COR'],\
label=r'Corradini $\mathrm{\it et \, al.}$')
ax[1].plot(xlp,4.*pi*g_corradini(xlp*kf,dens_d)/kl2, color=colors['COR'],\
linestyle=lsls['COR'], label=r'Corradini $\mathrm{\it et \, al.}$')
"""
fxc_mcp07 = mcp07_static(xl*kf,dens_d,param='PW92')
g_mcp07 = -fxc_mcp07*(xl*kf)**2/(4.*pi)
ax[0].plot(xl,g_mcp07,color='cyan',linestyle='-.',label=r'MCP07')
fxc_mcp07 = mcp07_static(xlp*kf,dens_d,param='PW92')
g_mcp07 = -fxc_mcp07*(xlp*kf)**2/(4.*pi)
ax[1].plot(xlp,g_mcp07/kl2,color='cyan',linestyle='-.',label=r'MCP07')
"""
for iplt in range(2):
ax[iplt].set_xlim(xl.min(),xl.max())
ax[1].set_xlabel('$q/k_\\mathrm{F}$',fontsize=12)
ax[0].set_ylabel('$G_+(q)$',fontsize=12)
ax[1].set_ylabel('$4\\pi \\, G_+(q) (k_\\mathrm{F}/q)^2$',fontsize=12)
if abs(rs - 0.1) < 1.e-15:
ymax0 = 1.1*max(gpapp.max(),gp_ra.max())
ymax1 = 5.
elif abs(rs - 100.) < 1.e-15:
ymax0 = 1.1*gpapp.max()
ymax1 = 5.6
elif rs <= 10.:
ymax0 = 1.1*max(gpapp.max(),gp_ra.max())
ymax1 = 1.1*max(gpapp_oq2.max(),gp_ra_oq2.max())
else:
ymax0 = 1.1*gpapp.max()
ymax1 = 1.1*gpapp_oq2.max()
ax[0].set_ylim(0.,ymax0)
ax[1].set_ylim(0.,ymax1)
if rs in [1,2]:
ileg = 0
tcoord = (0.01,0.05)
elif rs in [0.1]:
ileg = 1
tcoord = (0.9,0.9)
#elif rs in [100]:
# ileg = 1
# tcoord = (0.9, 0.05)
else:
ileg = 1
tcoord = (0.9,0.05)
ax[ileg].legend(fontsize=10,title='$r_\\mathrm{s}'+'={:}$'.format(rs),\
title_fontsize=12,frameon=False)#,ncol = 3,loc=(0.33,1.01))
ax[0].annotate('(a)',(0.01,0.9),fontsize=16,xycoords='axes fraction')
ax[1].annotate('(b)',tcoord,fontsize=16,xycoords='axes fraction')
#plt.show() ; exit()
plt.savefig('./figs/gplus_rs_{:}_2p.pdf'.format(rs), dpi=600, \
bbox_inches='tight')
plt.cla()
plt.clf()
plt.close()
return
def gminus_plots(acpars='PW92'):
xl = np.linspace(0.0,4.0,5000)
xlp = xl[1:]
rs_l = [0.1,1,2,3,4,5,100]
for irs, rs in enumerate(rs_l):
got_QMC_dat = False
ckgmf = rdir + '/data_files/CK_Gminus_rs_{:}.csv'.format(int(rs))
if path.isfile(ckgmf):
tdat = np.genfromtxt(ckgmf,delimiter=',',skip_header=1)
got_QMC_dat = True
fig, ax = plt.subplots(2,1,figsize=(5,7.5))
kf = rs_to_kf/rs
kf2 = kf*kf
kl2 = xlp**2#(xlp*kf)**2
a,b,c = get_g_minus_pars(rs,acpars=acpars)
if got_QMC_dat:
tq = tdat[:,0]#*kf
ax[0].errorbar(tdat[:,0],tdat[:,1],yerr=tdat[:,2],\
color=colors['CKKC'],\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
ax[1].errorbar(tdat[:,0],4.*pi*tdat[:,1]/tq**2, \
yerr=4.*pi*tdat[:,2]/tq**2, color=colors['CKKC'],\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
ax[0].plot(xl,a*xl**2,color=colors['SQE'],linestyle=lsls['SQE'],\
label='SQE')
ax[1].plot(xlp,4.*pi*a*np.ones_like(xlp),color=colors['SQE'],\
linestyle=lsls['SQE'], label='SQE')
ax[0].plot(xl,c*xl**2 + b,color=colors['LQE'],linestyle=lsls['LQE'],\
label='LQE')
ax[1].plot(xlp,4.*pi*(c + b/kl2), color=colors['LQE'],\
linestyle=lsls['LQE'], label='LQE')
gmapp = g_minus_new(xl*kf,rs,acpars=acpars)
ax[0].plot(xl,gmapp,color=colors['NEW'], label='This work',\
linestyle = lsls['NEW'])
gmapp_oq2 = 4.*pi*g_minus_new(xlp*kf,rs,acpars=acpars)/kl2
ax[1].plot(xlp,gmapp_oq2,color=colors['NEW'], label='This work',\
linestyle = lsls['NEW'])
gm_ra = g_minus_ra(xl*kf,0.,rs)
ax[0].plot(xl,gm_ra,color=colors['RA'],linestyle=lsls['RA'],\
label='Richardson-Ashcroft')
gm_ra_oq2 = 4.*pi*g_minus_ra(xlp*kf,0.,rs)/kl2
ax[1].plot(xlp,gm_ra_oq2, color=colors['RA'], \
linestyle=lsls['RA'], label='Richardson-Ashcroft')
for iplt in range(2):
ax[iplt].set_xlim(xl.min(),xl.max())
ax[1].set_xlabel('$q/k_\\mathrm{F}$',fontsize=12)
ax[0].set_ylabel('$G_-(q)$',fontsize=12)
ax[1].set_ylabel('$ 4 \\pi \\, G_-(q) (k_\\mathrm{F}/q)^2$', fontsize=12)
if rs <= 10.:
ymax0 = 1.1*max(gmapp.max(),gm_ra.max())
ymax1 = 1.1*max(gmapp_oq2.max(),gm_ra_oq2.max())
else:
ymax0 = 1.1*gmapp.max()
ymax1 = 1.1*gmapp_oq2.max()
ax[0].set_ylim(0.,ymax0)
ax[1].set_ylim(0.,ymax1)
#ax[0].legend(fontsize=10,title='$r_\\mathrm{s}'+'={:}$'.format(rs),\
# title_fontsize=18,ncol = 4,loc=(0.5,1.01))
if rs in [1,100]:
ileg = 0
tcoord = (0.01,0.05)
else:
ileg = 1
tcoord = (0.9,0.05)
ax[ileg].legend(fontsize=10,title='$r_\\mathrm{s}'+'={:}$'.format(rs),\
title_fontsize=12,frameon=False)
ax[0].annotate('(a)',(0.01,0.9),fontsize=16,xycoords='axes fraction')
ax[1].annotate('(b)',tcoord,fontsize=16,xycoords='axes fraction')
#plt.show() ; exit()
lstr = ''
if acpars == 'AKCK':
lstr = '_AKCK_ac'
plt.savefig('./figs/gminus_rs_{:}_2p{:}.pdf'.format(rs,lstr), dpi=600,\
bbox_inches='tight')
plt.cla()
plt.clf()
plt.close()
return
if __name__ == "__main__":
gplus_plots()
gminus_plots() | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/plot_LFFs.py | plot_LFFs.py |
import numpy as np
from AKCK_LFF.PW92 import g0_unp_pw92_pade, ec_pw92, gPW92
from AKCK_LFF.alda import alda
pi = np.pi
rs_to_kf = (9*pi/4.)**(1./3.)
gex2 = -5./(216.*pi*(3.*pi**2)**(1./3.))
def gec2(rs):
beta0 = 0.066725
ca = 3.0
cb = 1.046
cc = 0.100
cd = 1.778*cc
beta_acgga = beta0*(1 + ca*rs*(cb + cc*rs))/(1 + ca*rs*(1. + cd*rs))
return beta_acgga/16.*(pi/3.)**(1./3.)
def gexc2(rs):
return gex2 + gec2(rs)
def Bpos(rs):
a1 = 2.15
a2 = 0.435
b1 = 1.57
b2 = 0.409
rsh = rs**(0.5)
B = (1. + rsh*(a1 + a2*rs))/(3. + rsh*(b1 + b2*rs))
return B
def get_g_plus_pars(rs):
kf = rs_to_kf/rs
ec, d_ec_drs, _, _ = ec_pw92(rs,0.0)
fxc_alda = alda({'rs': rs, 'kF': kf, 'n': 3./(4.*pi*rs**3), 'rsh': rs**(0.5)},\
x_only=False, param='PW92')
Apos = -kf**2*fxc_alda/(4.*pi)
d_rs_ec_drs = ec + rs*d_ec_drs
C = -pi*d_rs_ec_drs/(2.*kf)
return Apos, Bpos(rs), C
def get_g_minus_pars(rs,acpars='PW92'):
kf = rs_to_kf/rs
ec, d_ec_drs, d_ec_drs2, d_ec_dz2 = ec_pw92(rs,0.)
if acpars == 'PW92':
ac = d_ec_dz2
elif acpars == 'AKCK':
ac = rev_alpha_c(rs)
Amin = (1. - 3.*pi*ac/kf)/4.
# Table 5.1
Bmin = Bpos(rs) + 2*g0_unp_pw92_pade(rs) - 1.
d_rs_ec_drs = ec + rs*d_ec_drs
C = -pi*d_rs_ec_drs/(2.*kf)
return Amin, Bmin, C
def rev_alpha_c(rs):
# current model of alpha_c(rs)
nps = [0.016886864, 0.086888870 , 10.357564711, 3.623216709, 0.439233491, 0.411840739]
return -gPW92(rs,nps)
def chi_enh(rs):
# Eq. 2.59 and Table 2.1 of Quantum Theory of Electron Liquid
rss3 = pi*rs_to_kf
kf = rs_to_kf/rs
ef = kf**2/2.
# square of Thomas-Fermi screening wavevector
ks2 = 4*kf/pi
ec, d_ec_drs, d_ec_drs2, d_ec_dz2 = ec_pw92(rs,0.)
# Eq. 5.113
return 1./(1. - rs/rss3 + 3.*d_ec_dz2/(2*ef))
def pade_g_plus(q,rs):
Ap, Bp, Cp = get_g_plus_pars(rs)
kf = rs_to_kf/rs
x2 = (q/kf)**2
Dp = -(3.*pi**2)**(4./3.)*gexc2(rs)/(2.*pi)
alp = 2.*Dp/(Ap - Cp)
bet = ( (Ap - Cp)/Bp)**2
enh = Cp + (Ap - Cp)/(1. + alp*x2 + bet*x2**2)**(0.5)
gplus = enh*x2
return gplus
if __name__ == "__main__":
rs_l = [1,2,3,4,5]
g_vmc = [1.152,1.296,1.438,1.576,1.683]
g_vmc_ucrt = [2,6,9,9,15]
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
rsl = np.linspace(1.,100.,5000)
#apar, bpar, cpar = get_g_plus_pars(rsl)
apar, bpar, cpar = get_g_minus_pars(rsl,0.)
plt.plot(rsl,(apar - cpar)/bpar)
plt.show(); exit()
fchi = chi_enh(rsl)
imax = np.argmax(fchi)
rsmax = rsl[imax]
hmax = fchi[imax]/2.
find_right = False
for irs in range(rsl.shape[0]):
tdiff = fchi[irs] - hmax
if tdiff > 0. and (not find_right):
ileft = irs
find_right = True
elif tdiff < 0. and find_right:
iright = irs
break
hwhm = (rsl[iright] - rsl[ileft])/2.
ffn = lambda c, x : c[0]/(1. + ((x - c[1])/c[2])**2)
def obj(c):
return ffn(c,rsl) - fchi
res = least_squares(obj,[fchi[imax],rsmax,hwhm])
print(res)
plt.plot(rsl,fchi)
plt.plot(rsl,ffn(res.x,rsl))
#plt.scatter(rs_l,g_vmc)
plt.show()
exit()
tstr = ''
for irs, rs in enumerate(rs_l):
enh = chi_enh(rs)
pdiff = 200*abs(enh - g_vmc[irs])/(enh + g_vmc[irs])
tstr += '{:} & {:}({:}) & {:.6f} & {:.2f} \\\\ \n'.format(rs,\
g_vmc[irs], g_vmc_ucrt[irs], enh, pdiff)
print(tstr) | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/asymptotics.py | asymptotics.py |
import numpy as np
def g0_unp_pw92_pade(rs):
"""
see Eq. 29 of
J. P. Perdew and Y. Wang,
Phys. Rev. B 46, 12947 (1992),
https://doi.org/10.1103/PhysRevB.46.12947
and erratum Phys. Rev. B 56, 7018 (1997)
https://doi.org/10.1103/PhysRevB.56.7018
NB the erratum only corrects the value of the a3
parameter in gc(rs, zeta, kf R)
"""
alpha = 0.193
beta = 0.525
return 0.5*(1 + 2*alpha*rs)/(1 + rs*(beta + rs*alpha*beta))**2
def gPW92(rs,v):
q0 = -2.0*v[0]*(1.0 + v[1]*rs)
rsh = rs**(0.5)
q1 = 2.0*v[0]*( rsh* (v[2] + rsh*( v[3] + rsh*( v[4] + rsh*v[5]))) )
return q0*np.log(1.0 + 1.0/q1)
def dgPW92(rs,v):
q0 = -2.0*v[0]*(1.0 + v[1]*rs)
q0p = -2.0*v[0]*v[1]
rsh = rs**(0.5)
q1 = 2.0*v[0]*( rsh* (v[2] + rsh*( v[3] + rsh*( v[4] + rsh*v[5]))) )
q1p = v[0]*( v[2]/rsh + 2.*v[3] + rsh*( 3.*v[4] + 4.*rsh*v[5] ) )
dg = q0p*np.log(1. + 1./q1) - q0*q1p/(q1*(1. + q1))
return dg
def ec_pw92(rs,z):
"""
Richardson-Ashcroft LFF needs some special derivatives of epsc, and moreover, needs them in
Rydbergs, instead of Hartree.
This routine gives those special derivatives in Rydberg
J.P. Perdew and Y. Wang,
``Accurate and simple analytic representation of the electron-gas correlation energy'',
Phys. Rev. B 45, 13244 (1992).
https://doi.org/10.1103/PhysRevB.45.13244
"""
rsh = rs**(0.5)
def g(v):
q0 = -2*v[0]*(1 + v[1]*rs)
dq0 = -2*v[0]*v[1]
q1 = 2*v[0]*(v[2]*rsh + v[3]*rs + v[4]*rs*rsh + v[5]*rs*rs)
dq1 = v[0]*(v[2]/rsh + 2*v[3] + 3*v[4]*rsh + 4*v[5]*rs)
ddq1 = v[0]*(-0.5*v[2]/rsh**3 + 3/2*v[4]/rsh + 4*v[5])
q2 = np.log(1 + 1/q1)
dq2 = -dq1/(q1**2 + q1)
ddq2 = (dq1**2*(1 + 2*q1)/(q1**2 + q1) - ddq1)/(q1**2 + q1)
g = q0*q2
dg = dq0*q2 + q0*dq2
ddg = 2*dq0*dq2 + q0*ddq2
return g,dg,ddg
unp_pars = [0.031091,0.21370,7.5957,3.5876,1.6382,0.49294]
pol_pars = [0.015545,0.20548,14.1189,6.1977,3.3662,0.62517]
alp_pars = [0.016887,0.11125,10.357,3.6231,0.88026,0.49671]
fz_den = 0.5198420997897464#(2**(4/3)-2)
fdd0 = 1.7099209341613653#8/9/fz_den
opz = np.minimum(2.,np.maximum(0.0,1.+z))
omz = np.minimum(2.,np.maximum(0.0,1.-z))
dxz = 0.5*(opz**(4./3.) + omz**(4./3.))
d_dxz_dz = 2./3.*(opz**(1./3.) - omz**(1./3.))
inftsml = 1.e-12
z_reg = np.minimum(1.-inftsml,np.maximum(-1.+inftsml,z))
d2_dxz_dz2 = 2./9.*((1. + z_reg)**(-2./3.) + (1. - z_reg)**(-2./3.))
fz = 2*(dxz - 1)/fz_den
d_fz_dz = 2*d_dxz_dz/fz_den
d2_fz_dz2 = 2*d2_dxz_dz2/fz_den
ec0,d_ec0_drs,d_ec0_drs2 = g(unp_pars)
ec1,d_ec1_drs,d_ec1_drs2 = g(pol_pars)
ac,d_ac_drs,d_ac_drs2 = g(alp_pars)
z4 = z**4
fzz4 = fz*z4
ec = ec0 - ac/fdd0*(fz - fzz4) + (ec1 - ec0)*fzz4
d_ec_drs = d_ec0_drs*(1 - fzz4) + d_ec1_drs*fzz4 - d_ac_drs/fdd0*(fz - fzz4)
d_ec_dz = -ac*d_fz_dz/fdd0 + (4*fz*z**3 + d_fz_dz*z4)*(ac/fdd0 + ec1 - ec0)
d_ec_drs2 = d_ec0_drs2*(1 - fzz4) + d_ec1_drs2*fzz4 - d_ac_drs2/fdd0*(fz - fzz4)
d_ec_dz2 = -ac*d2_fz_dz2/fdd0 + (12*fz*z**2 + 8*d_fz_dz*z**3 + d2_fz_dz2*z4) \
*(ac/fdd0 + ec1 - ec0)
return ec, d_ec_drs, d_ec_drs2, d_ec_dz2 | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/PW92.py | PW92.py |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
from os import path, system#, sys
from AKCK_LFF.asymptotics import get_g_plus_pars, get_g_minus_pars
from AKCK_LFF.ra_lff import g_plus_ra, g_minus_ra
from AKCK_LFF.g_corradini import g_corradini
from AKCK_LFF.global_plot_pars import colors, lsls
#plt.rcParams.update({'text.usetex': True, 'font.family': 'dejavu'})
pi = np.pi
rs_to_kf = (9*pi/4.)**(1./3.)
vtow = {'+': 'plus', '-': 'minus'}
rdir = path.dirname(path.realpath(__file__)) + '/'
def smooth_step(x,a,b):
f1 = np.exp(a*b)
f2 = np.exp(-a*x)
f = (f1 - 1.)*f2/(1. + (f1 - 2.)*f2)
return f
def simple_LFF(q,rs,c,var,init=False,acpars='PW92'):
kf = rs_to_kf/rs
q2 = (q/kf)**2
q4 = q2*q2
if var == '+':
CA, CB, CC = get_g_plus_pars(rs)
elif var == '-':
CA, CB, CC = get_g_minus_pars(rs,acpars=acpars)
if init:
alpha = c[0]
beta = c[1]
gamma = c[2]
else:
alpha = c[0] + c[1]*np.exp(-abs(c[2])*rs)
beta = c[3]
gamma = c[4]
interp1 = smooth_step(q4/16.,beta,gamma)
interp2 = 1. - interp1
asymp1 = q2*(CA + alpha*q4)
asymp2 = CB + CC*q2
LFF = asymp1*interp1 + asymp2*interp2
return LFF
figdir = './figs_from_fit/'
pardir = './fitted_LFF_pars/'
for tdir in [figdir,pardir]:
if not path.isdir(tdir):
system('mkdir ' + tdir)
def bootstrap(dat_d,npts,ips,var,rs_l,nstep=100,acpars='PW92'):
dat_l = np.zeros((npts,4))
ipts = 0
for akey in dat_d:
for apt in dat_d[akey]:
dat_l[ipts][0] = akey
dat_l[ipts][1] = apt[0]
dat_l[ipts][2] = apt[1]
dat_l[ipts][3] = apt[2]
ipts += 1
rs_no_fit = []
for rs in rs_l:
if rs not in dat_d:
rs_no_fit.append(rs)
nps = len(ips)
mean = np.zeros(nps)
varn = np.zeros(nps)
def gen_sim_dat():
tdx = np.random.randint(0,high=npts,size=npts)
return dat_l[tdx]
zl = np.linspace(0.0,4.0,1000)
for istep in range(nstep):
sdat = gen_sim_dat()
def tobj(c):
tres = np.zeros(npts+1)
kf = rs_to_kf/sdat[:,0]
LFF = simple_LFF(sdat[:,1]*kf, sdat[:,0], c, var, init=False, acpars=acpars)
tres[:-1] = (LFF - sdat[:,2])/sdat[:,3]
#tres[-1] = len(LFF[LFF<0.])
for rs in rs_l:
kf = rs_to_kf/rs
LFF = simple_LFF(zl*kf, rs, c, var, init=False, acpars=acpars)
tres[-1] += len(LFF[LFF<0.])
return tres
tres = least_squares(tobj,ips)
tps = tres.x
for ipar in range(nps):
mean[ipar] += tps[ipar]
varn[ipar] += tps[ipar]**2
mean /= 1.*nstep
varn /= 1.*nstep
stddev = np.maximum(0.,varn - mean**2)**(0.5)
return stddev
def main_fit(rs_l,ips0,var,acpars='PW92'):
Nps = len(ips0)
tdat = {}
tdat_CK = {}
tdat_MCS = {}
npts = 0
sgnstr = vtow[var]
for irs, rs in enumerate(rs_l):
CKLFF = rdir + '/data_files/CK_G{:}_rs_{:}.csv'.format(sgnstr,int(rs))
if path.isfile(CKLFF):
tdat_CK[rs] = np.genfromtxt(CKLFF,delimiter=',',skip_header=1)
if rs in tdat:
tdat[rs] = np.vstack((tdat[rs],tdat_CK[rs]))
else:
tdat[rs] = tdat_CK[rs].copy()
npts += tdat_CK[rs].shape[0]
if var == '+':
mcsgpf = rdir + '/data_files/MCS_Gplus_rs_{:}.csv'.format(int(rs))
if path.isfile(mcsgpf):
tdat_MCS[rs] = np.genfromtxt(mcsgpf,delimiter=',',skip_header=1)
if rs in tdat:
continue
tdat[rs] = np.vstack((tdat[rs],tdat_MCS[rs]))
else:
tdat[rs] = tdat_MCS[rs].copy()
npts += tdat_MCS[rs].shape[0]
zl = np.linspace(0.0,4.0,1000)
def fobj(c):
fres = np.zeros(npts+1)
tpts = 0
for rs in rs_l:
kf = rs_to_kf/rs
if rs in tdat:
LFF = simple_LFF(tdat[rs][:,0]*kf, rs, c, var, init=False, acpars=acpars )
fres[tpts:tpts+LFF.shape[0]] = (LFF - tdat[rs][:,1])/tdat[rs][:,2]
tpts += LFF.shape[0]
else:
LFF = simple_LFF(zl*kf,rs,c,var, acpars=acpars)
fres[-1] += len(LFF[LFF<0.])
return fres
ips = ips0.copy()
#for i in range(5):
res = least_squares(fobj,ips)
ips = (res.x).copy()
ucrt_boot = bootstrap(tdat,npts,ips.copy(),var,rs_l,nstep=1000, acpars=acpars)
# estimating error in coefficients via approximate covariance matrix
tjac = res.jac
app_hess = 0.5*np.matmul(tjac.T,tjac)
app_cov = np.linalg.inv(app_hess)
uncrt = np.zeros(Nps)
for ipar in range(Nps):
uncrt[ipar] = np.maximum(0.,app_cov[ipar,ipar])**(0.5)
tstr = ''
for ipar in range(Nps):
lchar = ', '
if ipar == Nps - 1:
lchar = ' \n'
tstr += 'c{:}{:}'.format(ipar,lchar)
tstr_tex = 'Parameter & Value & Uncertainty (Covariance) & Uncertainty (Bootstrap) \\\\ \\hline \n'
for ipar, apar in enumerate(ips):
#tstr += 'c_{:}, {:.6e} \n'.format(ipar,apar)
tmpstr = '{:.6e}'.format(apar)
fac, exp = tmpstr.split('e')
iexp = int(exp)
nfac = 6
if iexp < -1:
nfac -= iexp + 1
tstr_tex += 'c_{:}'.format(ipar)
tmpstr = ('{:.' + '{:}'.format(nfac) + 'f}').format(apar)
lchar = ', '
if ipar == Nps - 1:
lchar = ' \n'
tstr += tmpstr + lchar
tstr_tex += ' &= ' + tmpstr
for tval in [uncrt[ipar],ucrt_boot[ipar]]:
tmpstr2 = '{:.6e}'.format(tval)
fac, exp = tmpstr2.split('e')
iexp = int(exp)
nfac = 1
if iexp < -1:
nfac -= iexp + 1
tmpstr2 = ('{:.' + '{:}'.format(nfac) + 'f}').format(tval)
tstr_tex += ' & ' + tmpstr2
tstr_tex += ' \\\\ \n'
#print(tstr)
lstr = ''
if acpars == 'AKCK':
lstr = '_AKCK_ac'
with open(pardir + 'g{:}_pars{:}.csv'.format(sgnstr,lstr),'w+') as tfl:
tfl.write(tstr)
with open(pardir + 'g{:}_pars{:}.tex'.format(sgnstr,lstr),'w+') as tfl:
tfl.write(tstr_tex)
print('SSR = {:}'.format(np.sum(fobj(ips)**2)))
for irs, rs in enumerate(rs_l):
fig, ax = plt.subplots(figsize=(6,4))
kf = rs_to_kf/rs
if var == '+':
a,b,c = get_g_plus_pars(rs)
elif var == '-':
a,b,c = get_g_minus_pars(rs,acpars=acpars)
if rs in tdat_CK:
ax.errorbar(tdat_CK[rs][:,0],tdat_CK[rs][:,1],yerr=tdat_CK[rs][:,2],color='k',\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
if rs in tdat_MCS:
ax.errorbar(tdat_MCS[rs][:,0],tdat_MCS[rs][:,1],yerr=tdat_MCS[rs][:,2],color='m',\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
ax.plot(zl,a*zl**2,color=colors['SQE'],linestyle=lsls['SQE'],\
label='SQE')
ax.plot(zl,c*zl**2+b,color=colors['LQE'],linestyle=lsls['LQE'],\
label='LQE')
new_LFF = simple_LFF(zl*kf,rs,ips,var, acpars=acpars)
ax.plot(zl,new_LFF,color=colors['NEW'],label='This work',\
linestyle=lsls['NEW'])
if var == '+':
RA_LFF = g_plus_ra(zl*kf,0.,rs)
gcorr = g_corradini(zl*kf,\
{'rs': rs, 'kF': kf, 'n': 3./(4.*pi*rs**3), 'rsh': rs**(0.5)})
ax.plot(zl,gcorr,\
color=colors['COR'],linestyle=lsls['COR'],\
label=r'Corradini $\mathrm{\it et \, al.}$')
elif var == '-':
RA_LFF = g_minus_ra(zl*kf,0.,rs)
ax.plot(zl,RA_LFF,color=colors['RA'],linestyle=lsls['RA'],\
label='Richardson-Ashcroft')
ax.set_xlim(zl.min(),zl.max())
ax.set_xlabel('$q/k_\\mathrm{F}$',fontsize=12)
ax.set_ylabel('$G_'+var+'(q)$',fontsize=12)
ax.set_ylim(0.,1.1*new_LFF.max())
ax.legend(fontsize=10,\
title='$r_\\mathrm{s}'+'={:}$'.format(rs),\
title_fontsize=18,frameon=False)
plt.savefig(figdir + 'g{:}_rs_{:}{:}.pdf'.format(\
sgnstr, rs, lstr), dpi=600,bbox_inches='tight')
plt.cla()
plt.clf()
plt.close()
return
def init_fit(rs_l,var, acpars='PW92'):
tdat = {}
tdat_CK = {}
tdat_MCS = {}
npts = 0
tstr = 'rs, c0, c1, c2 \n'
zl = np.linspace(0.0,4.0,1000)
sgnstr = vtow[var]
for irs, rs in enumerate(rs_l):
CKLFF = rdir + '/data_files/CK_G{:}_rs_{:}.csv'.format(sgnstr,int(rs))
if path.isfile(CKLFF):
tdat_CK[rs] = np.genfromtxt(CKLFF,delimiter=',',skip_header=1)
if rs in tdat:
tdat[rs] = np.vstack((tdat[rs],tdat_CK[rs]))
else:
tdat[rs] = tdat_CK[rs].copy()
npts += tdat_CK[rs].shape[0]
if var == '+':
mcsgpf = rdir + '/data_files/MCS_Gplus_rs_{:}.csv'.format(int(rs))
if path.isfile(mcsgpf):
tdat_MCS[rs] = np.genfromtxt(mcsgpf,delimiter=',',skip_header=1)
if rs in tdat:
tdat[rs] = np.vstack((tdat[rs],tdat_MCS[rs]))
else:
tdat[rs] = tdat_MCS[rs].copy()
npts += tdat_MCS[rs].shape[0]
def fobj(c):
fres = np.zeros(npts+1)
tpts = 0
for rs in rs_l:#tdat:
kf = (9*pi/4.)**(1./3.)/rs
if rs in tdat:
LFF = simple_LFF(tdat[rs][:,0]*kf,rs,c,var,init=True, acpars=acpars)
fres[tpts:tpts+LFF.shape[0]] = (LFF - tdat[rs][:,1])/tdat[rs][:,2]
tpts += LFF.shape[0]
else:
LFF = simple_LFF(zl*kf,rs,c,var,init=True, acpars=acpars)
fres[-1] += len(LFF[LFF<0.])
return fres
res = least_squares(fobj,[.8,1.7,0.])
tstr += ('{:}, '*3 + '{:}\n').format(rs,*res.x)
lstr = ''
if acpars == 'AKCK':
lstr = '_AKCK_ac'
with open(pardir + 'optpars_g'+sgnstr+lstr+'.csv','w+') as tfl:
tfl.write(tstr)
return
def manip(rs,var,acpars='PW92'):
from matplotlib.widgets import Slider
zl = np.linspace(0.0,4.0,1000)
tdat_CK = {}
tdat_MCS = {}
sgnstr = vtow[var]
CKLFF = rdir + '/data_files/CK_G{:}_rs_{:}.csv'.format(sgnstr,int(rs))
if path.isfile(CKLFF):
tdat_CK[rs] = np.genfromtxt(CKLFF,delimiter=',',skip_header=1)
if var == '+':
mcsgpf = rdir + '/data_files/MCS_Gplus_rs_{:}.csv'.format(int(rs))
if path.isfile(mcsgpf):
tdat_MCS[rs] = np.genfromtxt(mcsgpf,delimiter=',',skip_header=1)
fig, ax = plt.subplots(figsize=(6,6))
fig.subplots_adjust(bottom=0.25)
kf = rs_to_kf/rs
if var == '+':
a,b,c = get_g_plus_pars(rs)
elif var == '-':
a,b,c = get_g_minus_pars(rs,acpars=acpars)
if rs in tdat_CK:
ax.errorbar(tdat_CK[rs][:,0],tdat_CK[rs][:,1],yerr=tdat_CK[rs][:,2],color='k',\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
if rs in tdat_MCS:
ax.errorbar(tdat_MCS[rs][:,0],tdat_MCS[rs][:,1],yerr=tdat_MCS[rs][:,2],color='m',\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
ax.plot(zl,a*zl**2,color='darkorange',linestyle='--')
ax.plot(zl,c*zl**2+b,color='tab:green',linestyle='-.')
a0 = 0.05
b0 = 0.75
g0 = 2.58
twrap = lambda ps : simple_LFF(zl*kf,rs,ps,var,init=True, acpars=acpars)
line, = ax.plot(zl,twrap([a0,b0,g0]),color='darkblue')
#line, = ax.plot(zl,gplus_zeropar(zl*kf,rs,gamma=a0),color='darkorange')
ax.set_xlim(zl.min(),zl.max())
ax.set_xlabel('$q/k_\\mathrm{F}$',fontsize=12)
ax.set_ylabel('$G_'+var+'(q)$',fontsize=12)
ax.set_ylim(0.,2.0)
a_ax = fig.add_axes([0.15, 0.12, 0.65, 0.03])
a_adj = Slider(
ax=a_ax,
label='$\\alpha$',
valmin=-6.0,
valmax=6.0,
valinit=a0
)
b_ax = fig.add_axes([0.15, 0.08, 0.65, 0.03])
b_adj = Slider(
ax=b_ax,
label='$\\beta$',
valmin=0.0,
valmax=6.0,
valinit=b0
)
g_ax = fig.add_axes([0.15, 0.04, 0.65, 0.03])
g_adj = Slider(
ax=g_ax,
label='$\\gamma$',
valmin=0.0,
valmax=6.0,
valinit=g0
)
def update_plot(val):
line.set_ydata(twrap([a_adj.val,b_adj.val,g_adj.val]))
fig.canvas.draw_idle()
a_adj.on_changed(update_plot)
b_adj.on_changed(update_plot)
g_adj.on_changed(update_plot)
plt.show() ; exit()
"""
def fitparser():
uargs = {'routine': None, 'var': None}
if len(sys.argv) < 1 + len(uargs.keys()):
qstr = 'Need to specify:\n'
for akey in uargs:
qstr += ' ' + akey + '\n'
raise SystemExit(qstr)
for anarg in sys.argv[1:]:
tkey, tval = anarg.split('=')
uargs[tkey.lower()] = tval.lower()
if uargs['routine'] == 'main':
rs_l = [1.e-6,0.01,0.1,1,2,3,4,5,10,69,100]
if uargs['var'] == '+':
ips = [-0.00365479, 0.0215642, 0.182898, 4.5, 1.2]
elif uargs['var'] == '-':
ips = [-0.00456264, 0.0261967, 0.338185, 0.65, 1.8]
main_fit(rs_l,ips,uargs['var'])
elif uargs['routine'] == 'init':
if uargs['var'] == '+':
rs_l = [1,2,5,10]
elif uargs['var'] == '-':
rs_l = [1,2,3,4,5]
init_fit(rs_l,uargs['var'])
elif uargs['routine'] == 'manip':
manip(float(uargs['rs']),uargs['var'])
return
"""
def fitparser(routine, var, rs = None, acpars='PW92'):
if routine == 'main':
rs_l = [1.e-6,0.01,0.1,1,2,3,4,5,10,69,100]
if var == '+':
ips = [-0.00365479, 0.0215642, 0.182898, 4.5, 1.2]
elif var == '-':
ips = [-0.00456264, 0.0261967, 0.338185, 0.65, 1.8]
main_fit(rs_l,ips,var,acpars=acpars)
elif routine == 'init':
if var == '+':
rs_l = [1,2,5,10]
elif var == '-':
rs_l = [1,2,3,4,5]
init_fit(rs_l,var,acpars=acpars)
elif routine == 'manip':
manip(rs,var,acpars=acpars)
return
if __name__ == "__main__":
fitparser('manip','+',1.) | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/fit_LFF.py | fit_LFF.py |
import numpy as np
from os import path
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
from AKCK_LFF.corr import get_ec_GK, ec_rpa_unp
def gen_RPA_dat():
rsl = [1.,2.,3.,4.,5.,10.,20.,40.,60.,80.,100.,120.]
datl = np.zeros((len(rsl),5))
uc = 4.0
qc = 4.0
maxinc = 100
for irs, rs in enumerate(rsl):
lpe = 1e20
for iq in range(maxinc):
for iu in range(maxinc):
ec_rpa = get_ec_GK(rs,fxc='RPA',uc=uc,qc=qc)
tpe = 100*(1. - ec_rpa/ec_rpa_unp(rs))
if abs(tpe) < 1.:
break
#rel_pe = 200*abs(lpe - tpe)/max(1.e-12,abs(lpe+tpe))
adiff = abs(lpe - tpe)
if adiff < .05:
break
print(rs,qc,uc,tpe,adiff)
lpe = tpe
uc += .5
if abs(tpe) < 1.:
break
qc += 0.5
datl[irs,0] = rs
datl[irs,1] = qc
datl[irs,2] = uc
datl[irs,3] = ec_rpa
datl[irs,4] = tpe
print(rs, qc, uc, ec_rpa, tpe)
np.savetxt('./ec_data/RPA_cutoffs.csv',datl[:irs+1,:],delimiter=',',\
header='rs, q cutoff (1/kf) ,freq cutoff (1/kf**2), ec_rpa, PE (%)')
return
def qcut(x,c):
f = np.zeros(x.shape)
tmsk = x <= 5.
f[tmsk] = c[0] + c[1]*x[tmsk]
tmsk = (5. < x) & (x <= 60.)
f[tmsk] = c[0] + 5.*c[1] + c[2]*(x[tmsk] - 5.) + c[3]*(x[tmsk] - 5.)**2
tmsk = (60. < x)
f[tmsk] = c[0] + 5.*c[1] + 55.*(c[2] + 55.*c[3]) + c[4]*(x[tmsk] - 60.)
return f
def gen_RPA_cutoffs():
if not path.isfile('./ec_data/RPA_cutoffs.csv'):
gen_RPA_dat()
tdat = np.genfromtxt('./ec_data/RPA_cutoffs.csv',delimiter=',',skip_header=1)
freq_cut = lambda x, c : c[0] + c[1]*x**(0.25) + c[2]*x**(0.5)
def freq_cut(x,c):
bkpt = 40.
ff = np.zeros(x.shape)
tmsk = x <= bkpt
ff[tmsk] = c[0] + c[1]*x[tmsk]**c[2]
tmsk = x > bkpt
ff[tmsk] = c[0] + c[1]*bkpt**c[2] + (x[tmsk] - bkpt)**c[3]
return ff
fobj = lambda c : freq_cut(tdat[:,0],c) - tdat[:,2]
fres = least_squares(fobj,np.ones(4))
print(fres)
print(*fres.x)
qobj = lambda c : qcut(tdat[:,0],c) - tdat[:,1]
qres = least_squares(qobj,np.ones(5))
print(qres)
print(*qres.x)
rsl = np.linspace(.1,130.,4000)
plt.scatter(tdat[:,0],tdat[:,2],color='darkblue')
plt.plot(rsl,freq_cut(rsl,fres.x),color='darkblue')
plt.show()
plt.scatter(tdat[:,0],tdat[:,1],color='darkorange')
plt.plot(rsl,qcut(rsl,qres.x),color='darkorange')
plt.xscale('log')
plt.yscale('log')
plt.show()
tstr = 'FREQ cut pars:\n'
for ipar, apar in enumerate(fres.x):
lchar = ', '
if ipar == len(fres.x)-1:
lchar = '\n\n'
tstr += '{:.6f}{:}'.format(apar,lchar)
tstr += 'WVVCTR cut pars:\n'
for ipar, apar in enumerate(qres.x):
lchar = ', '
if ipar == len(qres.x)-1:
lchar = '\n'
tstr += '{:.6f}{:}'.format(apar,lchar)
print(tstr)
return
if __name__ == "__main__":
gen_RPA_cutoffs() | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/fit_RPA_cutoffs.py | fit_RPA_cutoffs.py |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from AKCK_LFF.g_corradini import g_corradini
from AKCK_LFF.fitted_LFF import g_plus_new, g_minus_new
from AKCK_LFF.ra_lff import g_plus_ra, g_minus_ra
plt.rcParams.update({'text.usetex': True, 'font.family': 'dejavu'})
def surf_plots():
rs_l = np.linspace(1.,10.,5000)
kf_l = (9*np.pi/4.)**(1./3.)/rs_l
x_l = np.linspace(0.01,4.,2001)
x, rs = np.meshgrid(x_l, rs_l)
q = x.copy()
dv = {'rs': rs.copy(), 'rsh': rs**(0.5), 'kF': (9*np.pi/4.)**(1./3.)/rs,
'n' : 3./(4.*np.pi*rs**3)}
for irs in range(rs_l.shape[0]):
q[irs,:] *= kf_l[irs]
for ig in range(3):
fig, ax = plt.subplots(1,2,figsize=(8,4),subplot_kw={"projection": "3d"})
if ig == 2:
gfn = g_minus_new(q,rs)
gsymb = '-'
fsymb = 'm'
ym = 2.5
else:
gfn = g_plus_new(q,rs)
gsymb = '+'
fsymb = 'p'
ym = 4.
tfac = 4*np.pi/x**2
ax[0].plot_surface(x, rs, tfac*gfn, cmap=cm.viridis,\
linewidth=0, antialiased=False,rasterized=True)
ax[0].text(-.6,6.2,1.25*ym,'(a) This work',fontsize=14)
if ig == 0:
gfn = g_corradini(q,dv)
tlab = '(b) Corradini et al.'
modstr = 'corr'
elif ig == 1:
gfn = g_plus_ra(q,0.,rs)
tlab = '(b) Static RA'
modstr = 'RAS'
elif ig == 2:
gfn = g_minus_ra(q,0.,rs)
tlab = '(b) Static RA'
modstr = 'RAS'
ax[1].plot_surface(x, rs, tfac*gfn, cmap=cm.viridis,\
linewidth=0, antialiased=False,rasterized=True)
ax[1].text(-.6,6.2,1.25*ym, tlab,fontsize=14)
for iax in range(2):
ax[iax].set_xlabel('$q/k_\\mathrm{F}$',fontsize=14)
ax[iax].set_ylabel('$r_\\mathrm{s}$',fontsize=14)
#ax[iax].set_zlabel('$4\\pi \\, G_'+gsymb+'(r_\\mathrm{s},q)(k_\\mathrm{F}/q)^2$',fontsize=14)
ax[iax].set_zlabel('$4\\pi (k_\\mathrm{F}/q)^2 G_'+gsymb+'$',fontsize=14)
ax[iax].view_init(elev=20,azim=-70)
ax[iax].set_zlim(0.,ym)
#plt.show() ; exit()
plt.savefig('./figs/g{:}_{:}.pdf'.format(fsymb,modstr), \
dpi=600, bbox_inches='tight')
plt.cla()
plt.clf()
plt.close()
return
if __name__ == "__main__":
surf_plots() | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/surf_plots.py | surf_plots.py |
import numpy as np
def trap(fun,bds,opt_d={},args=(),kwargs={}):
prec=1.e-10
if 'prec' in opt_d:
prec = opt_d['prec']
simpson=False
if 'simpson' in opt_d:
simpson = opt_d['simpson']
h = (bds[1]-bds[0])/2.
max_depth = 40 # minimum step size is 2**(max_depth+1)
min_depth = 2
prev_h_int = -1e20
otsum = -1e20
wrapfun = lambda x : fun(x,*args,**kwargs)
tsum = 0.5*h*np.sum(wrapfun(np.asarray([bds[0],bds[1]])))
for iter in range(max_depth):
m_l = np.arange(bds[0]+h,bds[1],2*h)
tsum += h*np.sum(wrapfun(m_l))
if simpson:
ttsum = tsum
tsum = (4*ttsum - otsum)/3.0
if abs(prev_h_int - tsum) < prec*abs(prev_h_int) and iter > min_depth-1:
return tsum, {'code':1,'error': abs(prev_h_int - tsum) ,'step':h}
else:
l_err = abs(prev_h_int - tsum)
prev_h_int = tsum
if simpson:
otsum = ttsum
tsum = ttsum/2.0
else:
tsum /= 2.0 # reuse previous integrated value
h/=2.0 # halve the step size
if iter==max_depth:
return tsum, {'code':0,'error': l_err, 'step': h }
def r(u):
return 1. - u*( np.sign(u)*np.pi/2. - np.arctan(u) )
def g(u):
return -1./(9.*(1. + u**2)**2)
def num_integrand(u):
return r(u)*g(u)*np.log(r(u))
def den_integrand(u):
return r(u)*g(u)
def find_cut():
ul = np.linspace(0.,10.,1000)
i1 = num_integrand(ul)
i1max = np.abs(i1).max()
for iu in range(1,ul.shape[0]):
if abs(i1[iu]/i1max) < 1.e-3:
i1_cut = ul[iu]
break
i2 = den_integrand(ul)
i2max = np.abs(i2).max()
for iu in range(1,ul.shape[0]):
if abs(i2[iu]/i2max) < 1.e-3:
#print(ul[iu],i2[iu])
i2_cut = ul[iu]
break
return i1_cut, i2_cut
def integrate_funs():
i1_cut, i2_cut = find_cut()
prec = 1.e-10
oldval = 1.e20
for ibd in range(1,50):
i1val, msg = trap(num_integrand,(0.,ibd*i1_cut),{'prec': prec/10.})
if abs(i1val - oldval) < prec*abs(oldval):
print('DONE numerator\n',i1val,ibd,i1_cut)
print(msg)
break
oldval = i1val
oldval = 1.e20
for ibd in range(1,50):
i2val, msg = trap(den_integrand,(0.,ibd*i2_cut),{'prec': prec/10.})
if abs(i2val - oldval) < prec*abs(oldval):
print('DONE denominator\n',i2val,ibd,i2_cut)
print(msg)
break
oldval = i2val
ravg = i1val / i2val
return ravg
if __name__ == "__main__":
ravg = integrate_funs()
print(ravg) | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/alpha_c_c1.py | alpha_c_c1.py |
import numpy as np
from scipy.linalg import eigh_tridiagonal as tridiag
from os import path, system
qdir = './quad_grids/'
if not path.isdir(qdir):
system('mkdir -p ' + qdir)
ceil = lambda x : int(np.ceil(x))
floor = lambda x : int(np.floor(x))
def GLQ(M):
"""
Gauss-Legendre quadrature of order M
"""
fname = qdir + '/GLQ_{:}.csv'.format(M)
# algorithm from Golub and Welsch, Math. Comp. 23, 221 (1969)
def beta_GL_udiag(n):# coefficients from NIST's DLMF, sec. 18.9
an = (2*n+1.0)/(n+1.0)
anp1 = (2*n+3.0)/(n+2.0)
cnp1 = (n+1.0)/(n+2.0)
return (cnp1/an/anp1)**(0.5)
jac_diag = np.zeros(M)
jac_udiag = np.zeros(M-1)
jac_udiag = beta_GL_udiag(1.*np.arange(0,M-1,1))
grid,v = tridiag(jac_diag,jac_udiag)
wg = 2.0*v[0]**2
np.savetxt(fname,np.transpose((grid,wg)),delimiter=',',\
header='grid point, weight',fmt='%.16f')
return grid, wg
def gauss_kronrod(n):
# adapted from Dirk P. Laurie,
# CALCULATION OF GAUSS-KRONROD QUADRATURE RULE
# Mathematics of Computation 66, 1133 (1997).
# doi:10.1090/S0025-5718-97-00861-2
def coeff(n):
an = (2*n+1.0)/(n+1.0)
alp = 0.0
anp1 = (2*n+3.0)/(n+2.0)
cnp1 = (n+1.0)/(n+2.0)
return alp,(cnp1/an/anp1)#**(0.5)
a = np.zeros(2*n+1)
b = np.zeros(2*n+1)
b[0]=2.0
for jn in range(ceil(3*n/2.0)+1):
if jn < int(3*n/2.0):
a[jn],b[jn+1] = coeff(jn)
else:
_,b[jn+1] = coeff(jn)
gl_grid,gl_v = tridiag(a[:n],b[1:n]**(0.5))
gl_wg=2.0*gl_v[0]**2
t = np.zeros(floor(n/2.0)+2)
s = np.zeros(floor(n/2.0)+2)
t[1] = b[n+1]
for m in range(n-1):
u = 0.0
for k in range(floor((m+1.0)/2.0),-1,-1):
l = m-k
u += (a[k+n+1] - a[l])*t[k+1] + b[k+n+1]*s[k] - b[l]*s[k+1]
s[k+1] = u
ts = s
s = t
t = ts
for j in range(floor(n/2.0),-1,-1):
s[j+1] = s[j]
for m in range(n-1,2*n-2):
u = 0.0
for k in range(m+1-n,floor((m-1.0)/2.0)+1):
l = m - k
j = n - 1 -l
u += -(a[k+n+1] - a[l])*t[j+1] - b[k+n+1]*s[j+1] + b[l]*s[j+2]
s[j+1] = u
if m%2 == 0:
k = int(m/2)
a[k+n+1] = a[k] + (s[j+1] - b[k+n+1]*s[j+2])/t[j+2]
else:
k = int((m+1)/2)
b[k+n+1] = s[j+1]/s[j+2]
ts = s
s = t
t = ts
a[2*n] = a[n-1] - b[2*n]*s[1]/t[1]
grid,v = tridiag(a,b[1:]**(0.5))#
wg = b[0]*v[0]**2
glwg = np.zeros(wg.shape)
for ipt,pt in enumerate(grid):
for jp,pp in enumerate(gl_grid):
if abs(pp-pt)<1.e-12:
glwg[ipt] = gl_wg[jp]
np.savetxt(qdir+'GKQ_'+str(2*n+1)+'_pts.csv', \
np.transpose((grid,wg,wg - glwg)), delimiter=',',\
header='GK point, GK weight, GK - GL weight',fmt='%.16f')
return
def switch_elts(arr,i1,i2):
# switches the i1 and i2 elements of array arr
tmp = arr[i1].copy()
arr[i1] = arr[i2].copy()
arr[i2] = tmp.copy()
return arr
def GK_GA_PINF(fun,lbd,opt_d={},args=(),kwargs={}):
# for integrating a function from lbd to infinity
dopts = {'prec': 1.e-8, 'npts': 3, 'min_recur': 2, 'max_search': 1000}
for anopt in opt_d:
dopts[anopt] = opt_d[anopt]
wfun = lambda x : fun(x,*args,**kwargs)
if 'breakpoint' in dopts:
bkpt = dopts['breakpoint']
else:
bkpt = max(1.e-3,lbd)
tfun = wfun(bkpt)
ofun = tfun
tscp = 1.5
for isrch in range(dopts['max_search']):
bkpt *= tscp
cfun = wfun(bkpt)
if abs(cfun/tfun) < dopts['prec'] or \
abs(cfun - ofun)/abs(cfun + ofun) < dopts['prec']:
break
ofun = cfun
#tfun = cfun
dopts['prec'] /= 2.
igrl1, msg1 = GK_global_adap(wfun,(lbd,bkpt),opt_d=dopts)
wifun = lambda x : fun(1./x, *args, **kwargs)/x**2
igrl2, msg2 = GK_global_adap(wifun,(1./(2.*bkpt),1./bkpt),opt_d=dopts)
igrl3, msg3 = GK_global_adap(wifun,(1./(4.*bkpt),1./bkpt),opt_d=dopts)
#igrl4, msg4 = GK_global_adap(wifun,(dopts['prec']/10.,1./bkpt),opt_d=dopts)
slope = 4*bkpt*(igrl2 - igrl3)
icpt = igrl2 - slope/(2.*bkpt)
if abs(icpt/max(1.e-12,igrl1)) > 1.e2:
print('GK_GA_PINF FATAL!! Extrapolated improper integral much larger than lower range')
print(igrl1,args,abs(icpt/max(1.e-12,igrl1)))
exit()
od = {'code_cut': msg1['code'], 'code_upper_2cut': msg2['code'],
'code_upper_4cut': msg3['code'],
'error_lower': msg1['error'], 'error_upper_2cut': msg2['error'],
'error_upper_4cut': msg3['error'],
'integral_lower': igrl1, 'integral_upper_2cut': igrl2,
'integral_upper_4cut': igrl3,
'extrap_integral_upper': icpt, 'extrap_slope': slope
}
return igrl1 + icpt, od
def GK_global_adap(fun,bds,opt_d={},args=(),kwargs={}):
"""
error codes:
> 0 successful integration
1 absolutely no issues
<= 0 unsucessful integration:
0 exceeded maximum number of steps
-1 NaN error (errors are NaN)
-2 Bisection yielded regions smaller than machine precision
-3 Result was below machine precision, estimating as zero
"""
meps = abs(7/3-4/3-1) # machine precision
wrapfun = lambda x : fun(x,*args,**kwargs)
lbd,ubd = bds
def_pts = 5
prec = 1.0e-8
if 'prec' in opt_d:
prec = opt_d['prec']
min_recur = 2
if 'min_recur' in opt_d:
min_recur = opt_d['min_recur']
if 'max_recur' in opt_d:
max_recur = opt_d['max_recur']
else:
# 2**max_recur bisections yields a region of width 10**(-60)
max_recur = ceil((np.log(abs(bds[1]-bds[0])) + 60*np.log(10.0))/np.log(2.0))
max_recur = max(max_recur,1000)
if 'npts' in opt_d:
npts = opt_d['npts']
else:
npts = def_pts
if 'error monitoring' not in opt_d:
opt_d['error monitoring'] = False
if 'err_meas' not in opt_d:
opt_d['err_meas'] = 'abs_diff'
if 'rel_tol' in opt_d:
rel_tol = opt_d['rel_tol']
else:
rel_tol = min(0.01,100*prec)
def_grid = qdir + '/GKQ_'+str(2*npts+1)+'_pts.csv'
if not path.isfile(def_grid) or path.getsize(def_grid)==0:
gauss_kronrod(npts) # returns 2*N + 1 points
mesh,wg,wg_err = np.transpose(np.genfromtxt(def_grid,delimiter=',',skip_header=1))
if 'reg' in opt_d:
working_regs = []
for iareg,areg in enumerate(opt_d['reg']):
if iareg == 0:
working_regs.append([lbd,areg[1]])
elif iareg == len(opt_d['reg'])-1:
working_regs.append([areg[0],ubd])
else:
working_regs.append(areg)
else:
treg = np.linspace(lbd,ubd,min_recur+1)
working_regs = []
for ibord in range(len(treg)-1):
working_regs.append([treg[ibord],treg[ibord+1]])
reg_l = np.zeros((2*max_recur+1,2))
err_l = np.zeros(2*max_recur+1)
sum_l = np.zeros(2*max_recur+1)
ipos = -1
for irecur in range(max_recur):
for ireg, areg in enumerate(working_regs):
ipos += 1
x_mesh = 0.5*(areg[1]-areg[0])*mesh + 0.5*(areg[1]+areg[0])
x_wg = 0.5*(areg[1]-areg[0])*wg
x_wg_err = 0.5*(areg[1]-areg[0])*wg_err
tvar = wrapfun(x_mesh)
tint = np.sum(x_wg*tvar)
tint_err = abs(np.sum(x_wg_err*tvar))
reg_l[ipos] = areg.copy() #np.vstack((reg_l,areg))
sum_l[ipos] = tint#np.append(sum_l,tint)
if opt_d['err_meas']=='quadpack':
"""
empirical error measure from:
R. Piessens, E. de Doncker-Kapenga, C. W. Uberhuber, and D. K. Kahaner
``QUADPACK: A Subroutine Package for Automatic Integration''
Springer-Verlag, Berlin, 1983.
doi: 10.1007/978-3-642-61786-7
"""
fac = np.sum(x_wg*np.abs(tvar - tint/(areg[1]-areg[0])))
gk_err = tint_err
if fac == 0.0:
cloc_err = 0.0
else:
cloc_err = fac*min(1.0,(200*gk_err/fac)**(1.5))
#err_l[irecur+ireg] np.append(err_l,lerr_meas)
elif opt_d['err_meas']=='abs_diff' or opt_d['err_meas']=='global_rel':
#err_l = np.append(err_l,tint_err)
cloc_err = tint_err
elif opt_d['err_meas']=='local_rel':
#err_l = np.append(err_l,tint_err/max(meps,abs(tint)))
cloc_err = tint_err/max(meps,abs(tint))
err_l[ipos] = cloc_err
csum = np.sum(sum_l[:ipos+1])
cprec = max(meps,min(prec,abs(csum)/2))
if opt_d['err_meas']=='global_rel':
global_error = np.sum(err_l)/max(meps,csum)
else:
global_error = np.sum(err_l)
if opt_d['error monitoring']:
print(global_error,csum)
if abs(csum)< meps:
return 0.0,{'code':-3,'error':global_error}
if global_error != global_error: # if the output is NaN, completely failed
return csum,{'code':-1,'error':global_error}
if global_error < cprec: # SUCCESS!!!!
return csum,{'code':1,'error':global_error}
else:
#inds = np.argsort(err_l)
ibad = np.argmax(err_l)
bad_reg = reg_l[ibad].copy()
bad_err = err_l[ibad].copy()
err_l = switch_elts(err_l,ipos,ibad)
reg_l = switch_elts(reg_l,ipos,ibad)
sum_l = switch_elts(sum_l,ipos,ibad)
ipos -= 1
#err_l = err_l[inds][:-1]
#reg_l = reg_l[inds][:-1]
#sum_l = sum_l[inds][:-1]
mid = (bad_reg[0] + bad_reg[1])/2.0 # recursive bisection of highest error region
if abs(bad_reg[1]-bad_reg[0])< meps or abs(bad_reg[1]-mid)< meps \
or abs(bad_reg[0]-mid)< meps:
# bisection yields differences below machine precision, integration failed
return csum,{'code':-2,'error':global_error}
working_regs = [[bad_reg[0],mid],[mid,bad_reg[1]]]
if irecur == max_recur-1:
if abs(csum)<meps:
return 0.0,{'code':-3,'error':global_error}
else:
return csum,{'code':0,'error':global_error}
if __name__ == "__main__":
tfun = lambda x : np.exp(-x**2)
igrl, msg = GK_GA_PINF(tfun,0.,opt_d={},args=(),kwargs={})
#igrl,msg = GK_global_adap(tfun,(-1.,.7),opt_d={},args=(),kwargs={})
print(igrl,igrl - (np.pi)**(0.5)/2.)
print(msg) | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/gauss_quad.py | gauss_quad.py |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from scipy.optimize import least_squares, bisect
from os import path,system
from AKCK_LFF.PZ81 import chi_enh_pz81
from AKCK_LFF.PW92 import ec_pw92, gPW92, dgPW92
from AKCK_LFF.QMC_data import get_ck_chi_enh, ke_ex
bdir = './stiffness_refit/'
if not path.isdir(bdir):
system('mkdir ' + bdir)
pi = np.pi
plt.rcParams.update({'text.usetex': True, 'font.family': 'dejavu'})
"""
Eqs. 4.9 - 4.10 of
S.H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980);
doi: 10.1139/p80-159
"""
kf_to_rs = (9.*pi/4.)**(1./3.)
c0_alpha = -1./(6.*pi**2)
PT_integral = 0.5315045266#0.531504
c1_alpha = (np.log(16.*pi*kf_to_rs) - 3. + PT_integral )/(6.*pi**2)
def get_exp_pars(A,alpha1,beta1,beta2,beta3,beta4):
c0 = A
c1 = -2*c0*np.log(2.*c0*beta1)
c2 = A*alpha1
c3 = -2*A*(alpha1*np.log(2*A*beta1) - (beta2/beta1)**2 + beta3/beta1)
d0 = alpha1/beta4
d1 = alpha1*beta3/beta4**2
return c0, c1, c2, c3, d0, d1
def spinf(z,pow):
opz = np.minimum(2,np.maximum(0.0,1+z))
omz = np.minimum(2,np.maximum(0.0,1-z))
return (opz**pow + omz**pow)/2.0
def ts(rs,z):
# kinetic energy per electron
ts0 = 3./10.*(kf_to_rs/rs)**2
ds = spinf(z,5./3.)
return ts0*ds
def epsx(rs,z):
# exchange energy per electron
ex0 = -3./(4.*pi)*kf_to_rs/rs
dx = spinf(z,4./3.)
return ex0*dx
def epsc_PW92_rev(rs,z,ps):
ec0 = gPW92(rs,[0.031091,0.21370,7.5957,3.5876,1.6382,0.49294])
ec1 = gPW92(rs,[0.015545,0.20548,14.1189,6.1977,3.3662,0.62517])
mac = gPW92(rs,ps)
fz_den = (2.**(1./3.)-1.)
fdd0 = 4./9./fz_den
dx_z = spinf(z,4./3.)
fz = (dx_z - 1.)/fz_den
z4 = z**4
fzz4 = fz*z4
ec = ec0 - mac/fdd0*(fz - fzz4) + (ec1 - ec0)*fzz4
return ec
def chi_enh_pw92(rs):
mac_pw92 = gPW92(rs,[0.016887,0.11125,10.357,3.6231,0.88026,0.49671])
return 1./(1. - rs/(pi*kf_to_rs) - 3.*(rs/kf_to_rs)**2*mac_pw92)
def chi_enh(rs,ps):
malpha_c = gPW92(rs,ps)
chi_s_chi_p = 1. - rs/(pi*kf_to_rs) - 3.*(rs/kf_to_rs)**2*malpha_c
return 1./chi_s_chi_p
def d_chi_enh(rs,ps):
malpha_c = gPW92(rs,ps)
d_malpha_c = dgPW92(rs,ps)
chi_sp = 1. - rs/(pi*kf_to_rs) - 3.*(rs/kf_to_rs)**2*malpha_c
d_chi_sp_drs = -1./(pi*kf_to_rs) - 3.*rs/kf_to_rs**2 *(2.*malpha_c \
+ rs*d_malpha_c )
return -d_chi_sp_drs/chi_sp**2
def alpha_suc_pz(rs,c,ret_coeff=False):
alpha_c = np.zeros(rs.shape)
tden = 1. + c[1] + c[2]
tden2 = tden*tden
g1 = -c[0]/tden
g2 = c[0]*(c[1]/2. + c[2])/tden2
g3 = -c[0]* ( 2.*(c[1]/2. + c[2])**2/tden + 0.25*c[1])/tden2
ca = c0_alpha
cb = -c1_alpha
cd = g1 - cb
cc =(-3*g1 + 3*g2 - g3 - 4*ca + 3*cb)/2.
ce = (g1 - g2 + g3 + 2*ca - cb)/2.
if ret_coeff:
return {'A': ca, 'B': cb, 'C': cc, 'D': cd, 'E': ce,\
'gamma': c[0], 'beta1': c[1], 'beta2': c[2]}
tmsk = rs < 1.
rsm = rs[tmsk]
lrsm = np.log(rsm)
alpha_c[tmsk] = ca*lrsm + cb + cc*rsm*lrsm + cd*rsm + ce*rsm**2*lrsm
tmsk = rs >= 1.
rsm = rs[tmsk]
alpha_c[tmsk] = -c[0]/(1. + c[1]*rsm**(0.5) + c[2]*rsm)
return alpha_c
def chi_enh_new_pz(rs,c):
ac = alpha_suc_pz(rs,c)
chi_s_chi_p = 1. - rs/(pi*kf_to_rs) + 3.*(rs/kf_to_rs)**2*ac
return 1./chi_s_chi_p
def get_dev_from_PW92(newpars,rsmin = 1.e-2, rsmax = 5.e2, Nrs = 100000):
rsl = np.exp(np.linspace(np.log(rsmin),np.log(rsmax),Nrs))
mac_pw92 = gPW92(rsl,[0.016887,0.11125,10.357,3.6231,0.88026,0.49671])
mac_new = gPW92(rsl,newpars)
pdiff = 200.*abs(mac_new - mac_pw92)/abs(mac_new + mac_pw92)
imax = np.argmax(pdiff)
return rsl[imax], -mac_pw92[imax], -mac_new[imax], pdiff[imax]
def fit_alpha_c_new():
rs_fit, echi, uchi = get_ck_chi_enh()
Nchi = rs_fit.shape[0]
#pzobj = lambda c : (chi_enh_new_pz(rs_fit,c) - echi)/uchi
#pzres = least_squares(pzobj,[3.4787, -84.4585, 4.09087])
#print(pzres.x)
# Table VI
unp_fluid = np.transpose(np.array([
[30.,40.,60.,80.,100.],
[22.6191,17.6143,12.2556,9.4259,7.6709],
[7.e-4,3.e-4,3.e-4,4.e-4,3.e-4]
]))
pol_fluid = np.transpose(np.array([
[30.,40.,60.,80.,100.],
[22.4819,17.5558,12.2418,9.4246,7.6720],
[7.e-4,7.e-4,5.e-4,3.e-4,4.e-4]
]))
unp_fluid[:,1] = -1.e-3*unp_fluid[:,1] - ke_ex(unp_fluid[:,0],0.)
unp_fluid[:,2] *= 1.e-3
pol_fluid[:,1] = -1.e-3*pol_fluid[:,1] - ke_ex(pol_fluid[:,0],1.)
pol_fluid[:,2] *= 1.e-3
NADRS = unp_fluid.shape[0]
AD_rs = np.zeros(NADRS)
AD_ac = np.zeros(NADRS)
AD_ac_ucrt = np.zeros(NADRS)
PZ_fdd0 = 4./(9.*(2.**(1./3.)-1.))
for irs in range(NADRS):
AD_rs[irs] = unp_fluid[irs,0]
AD_ac[irs] = PZ_fdd0*(pol_fluid[irs,1] - unp_fluid[irs,1])
AD_ac_ucrt[irs] = PZ_fdd0*(pol_fluid[irs,2]**2 + unp_fluid[irs,2]**2)**(0.5)
c1_pw92 = 1./(2.*abs(c0_alpha))*np.exp(-c1_alpha/(2.*abs(c0_alpha)))
ips = [0.11125, 0.88026,0.49671]
bdsl = [0.,-np.inf,0.]#[0. for i in range(len(ips))]
bdsu = [np.inf for i in range(len(ips))]
def get_PW92_pars(c):
ps = np.zeros(6)
ps[0] = abs(c0_alpha)
ps[1] = c[0]
ps[2] = c1_pw92
ps[3] = 2.*ps[0]*ps[2]**2
ps[4] = c[1]
ps[5] = c[2]
return ps
def obj(c):
res = np.zeros(Nchi + NADRS)
tps = get_PW92_pars(c)
res[:Nchi] = (chi_enh(rs_fit, tps) - echi)/uchi
i = Nchi
ac = -gPW92(AD_rs,tps)
res[Nchi:] = (ac - AD_ac)/AD_ac_ucrt
return res
res = least_squares(obj,ips,bounds = (bdsl,bdsu))
tobj = np.sum(res.fun**2)
opars = get_PW92_pars(res.x)
mac_exps = get_exp_pars(*opars)
parnms = ['A','\\alpha_1','\\beta_1','\\beta_2','\\beta_3','\\beta_4']
expnms = ['$c_0$','$c_1$','$c_2$','$c_3$','$d_0$','$d_1$']
tstr = ''
for ipar in range(len(parnms)):
tstr += '${:}$ & {:.9f} & {:} & {:.9f} \\\\ \n'.format(parnms[ipar],opars[ipar],expnms[ipar],mac_exps[ipar])
with open(bdir + 'alphac_pars_rev.tex','w+') as tfl:
tfl.write(tstr)
tstr = r' & QMC \cite{chen2019,kukkonen2021} & \multicolumn{2}{c}{PW92} & \multicolumn{2}{c}{This work} \\' + '\n'
tstr += r' $r_\mathrm{s}$ & & $\chi_s/\chi_s^{(0)}$ & PD (\%) & $\chi_s/\chi_s^{(0)}$ & PD (\%) \\ \hline' + ' \n'
for irs, rs in enumerate(rs_fit):
echi_pw92 = chi_enh_pw92(rs)
echi_new = chi_enh(rs,opars)
pd_pw92 = 200.*(echi_pw92 - echi[irs])/(echi[irs] + echi_pw92)
pd_new = 200.*(echi_new - echi[irs])/(echi[irs] + echi_new)
tprec = len(str(echi[irs]).split('.')[-1])
tstr += '{:} & {:}({:.0f}) & {:.6f} & {:.2f} & {:.6f} & {:.2f} \\\\ \n'.format(\
int(rs),echi[irs],uchi[irs]*10.**tprec,echi_pw92,pd_pw92,echi_new,pd_new)
with open(bdir + 'chi_enhance.tex','w+') as tfl:
tfl.write(tstr)
rs_min = 1.e-1
rs_max = 1.e3
Nrs = 5000
rsl_log = np.linspace(np.log(rs_min),np.log(rs_max),Nrs)
rsl = np.exp(rsl_log)
#plt.plot(rsl,gPW92(rsl,opars)-gPW92(rsl,[0.016887,0.11125,10.357,3.6231,0.88026,0.49671]))
#plt.show();exit()
fig, ax = plt.subplots(figsize=(6,4))
ax.errorbar(rs_fit,echi,yerr=uchi,color='k',\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
#plt.plot(rsl,chi_enh(rsl,c0_alpha,c1_alpha,get_gam(res2.x[0]),*res2.x))
nchi = chi_enh(rsl,opars)
ax.plot(rsl,nchi,color='darkblue',label='This work')
ax.annotate('This work',(150.,80.),color='darkblue',fontsize=14)
#plt.plot(rsl,chi_enh(rsl,c0_alpha,*res3.x))
echi_pw92 = chi_enh_pw92(rsl)
echi_pz81 = chi_enh_pz81(rsl)
ax.plot(rsl,echi_pw92,color='darkorange',linestyle='--',label='PW92')
ax.annotate('PW92',(94.,518.),color='darkorange',fontsize=14)
ax.plot(rsl,echi_pz81,color='tab:green',linestyle='-.',label='PZ81')
ax.annotate('PZ81',(4.4,114.6),color='tab:green',fontsize=14)
#"""
axins = inset_axes(ax, width=1.7, height=1.,\
loc='lower left', bbox_to_anchor=(.46,.06), \
bbox_transform=ax.transAxes)
axins.errorbar(rs_fit,echi,yerr=uchi,color='k',\
markersize=3,marker='o',linewidth=0,elinewidth=1.5)
axins.plot(rsl,nchi,color='darkblue',label='This work')
axins.plot(rsl,echi_pw92,color='darkorange',linestyle='--',label='PW92')
axins.plot(rsl,echi_pz81,color='tab:green',linestyle='-.',label='PZ81')
#ax.plot(rsl,chi_enh_new_pz(rsl,pzres.x),color='red')
#axins.plot(rsl,chi_enh_new_pz(rsl,pzres.x),color='red')
axins.set_xlim(0.5,6.)
axins.set_ylim(1.,1.8)
axins.xaxis.set_minor_locator(MultipleLocator(0.5))
axins.xaxis.set_major_locator(MultipleLocator(1.))
axins.yaxis.set_minor_locator(MultipleLocator(0.25))
axins.yaxis.set_major_locator(MultipleLocator(0.5))
#"""
ax.set_xlim(rs_min,rs_max)
ax.set_ylim(1.e-2,1.5e3)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('$r_\\mathrm{s}$ (bohr)',fontsize=14)
ax.set_ylabel(r'$\chi_s/\chi_s^{(0)}$',fontsize=14)
#ax.legend(fontsize=14)
#plt.show() ; exit()
plt.savefig(bdir + 'suscep_enhance.pdf',dpi=600, \
bbox_inches='tight')
plt.cla()
plt.clf()
plt.close()
mx_rs_dev, mx_alp_pw92, mx_alp_new, mx_pdiff = get_dev_from_PW92(opars)
print('Max percent deviation between PW92 ({:.2e}) and this work ({:.2e})'.format(mx_alp_pw92,mx_alp_new))
print(' at rs = {:.2f} ({:.6f}%)'.format(mx_rs_dev,mx_pdiff))
return
if __name__ == "__main__":
fit_alpha_c_new() | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/stiffness_refit.py | stiffness_refit.py |
import numpy as np
from AKCK_LFF.alda import alda,lda_derivs
from AKCK_LFF.mcp07_static import mcp07_static
pi = np.pi
gam = 1.311028777146059809410871821455657482147216796875
# NB: got this value from julia using the following script
# using SpecialFunctions
# BigFloat((gamma(0.25))^2/(32*pi)^(0.5))
cc = 4.81710873550434914847073741839267313480377197265625 # 23.0*pi/15.0
def exact_constraints(dv):
n = dv['n']
kf = dv['kF']
rs = dv['rs']
f0 = alda(dv,x_only=False,param='PW92')
"""
from Iwamato and Gross, Phys. Rev. B 35, 3003 (1987),
f(q,omega=infinity) = -4/5 n^(2/3)*d/dn[ eps_xc/n^(2/3)] + 6 n^(1/3) + d/dn[ eps_xc/n^(1/3)]
eps_xc is XC energy per electron
"""
# exchange contribution is -1/5 [3/(pi*n^2)]^(1/3)
finf_x = -1.0/(5.0)*(3.0/(pi*n**2))**(1.0/3.0)
# correlation contribution is -[22*eps_c + 26*rs*(d eps_c / d rs)]/(15*n)
eps_c,d_eps_c_d_rs = lda_derivs(dv,param='PW92')
finf_c = -(22.0*eps_c + 26.0*rs*d_eps_c_d_rs)/(15.0*n)
finf = finf_x + finf_c
bfac = (gam/cc)**(4.0/3.0)
deltaf = finf - f0
bn = bfac*deltaf**(4.0/3.0)
return bn,finf
def GKI_im_freq(u,dv):
bn,finf = exact_constraints(dv)
y = bn**(0.5)*u
y2 = y*y
cp = (1.219946,0.973063,0.42106,1.301184,1.007578)
inum = 1. - cp[0]*y + cp[1]*y2
iden = 1. + y2*(cp[2] + y2*(cp[3] + y2*(cp[4] + y2*(cp[1]/gam)**(16./7.) )))
interp = 1./gam*inum/iden**(7./16.)
fxcu = -cc*bn**(3./4.)*interp + finf
return fxcu
def g_rMCP07(q,u,dv):
f0 = alda(dv,x_only=False,param='PW92')
fxc_q = mcp07_static(q,dv,param='PW92')
fp = {'a': 3.846991, 'b': 0.471351, 'c': 4.346063, 'd': 0.881313}
kscr = dv['kF']*(fp['a'] + fp['b']*dv['kF']**(1.5))/(1. + dv['kF']**2)
sclfun = (dv['rs']/fp['c'])**2
pscl = sclfun + (1. - sclfun)*np.exp(-fp['d']*(q/kscr)**2)
fxc_omega = GKI_im_freq(u*pscl,dv)
fxc = (1.0 + np.exp(-(q/kscr)**2)*(fxc_omega/f0 - 1.0))*fxc_q
return -q**2*fxc/(4.*pi) | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/rMCP07.py | rMCP07.py |
import numpy as np
pi = np.pi
# From J. P. Perdew and Alex Zunger,
# Phys. Rev. B 23, 5048, 1981
# doi: 10.1103/PhysRevB.23.5048
# for rs < 1
au = 0.0311
bu = -0.048
cu = 0.0020
du = -0.0116
# for rs > 1
gu = -0.1423
b1u = 1.0529
b2u = 0.3334
gp = -0.0843
# From J. P. Perdew and W. Yang
# Phys. Rev. B 45, 13244 (1992).
# doi: 10.1103/PhysRevB.45.13244
A = 0.0310906908696549008630505284145328914746642112731933593
# from julia BigFloat((1-log(2))/pi^2)
alpha = 0.21370
beta1 = 7.5957
beta2 = 3.5876
beta3 = 1.6382
beta4 = 0.49294
def alda(dv,x_only=False,param='PZ81'):
n = dv['n']
kf = dv['kF']
rs = dv['rs']
rsh = dv['rsh']
fx = -pi/kf**2
# The uniform electron gas adiabatic correlation kernel according to
if param == 'PZ81':
# Perdew and Zunger, Phys. Rev. B, 23, 5076 (1981)
if x_only:
return fx
else:
if hasattr(rs,'__len__'):
fc = np.zeros(rs.shape)
fc_lsr = -(3*au + 2*cu*rs*np.log(rs) + (2*du + cu)*rs)/(9*n)
fc_gtr = 5*b1u*rsh + (7*b1u**2 + 8*b2u)*rs + 21*b1u*b2u*rsh**3 + (4*b2u*rs)**2
fc_gtr *= gu/(36*n)/(1.0 + b1u*rsh + b2u*rs)**3
if hasattr(rs,'__len__'):
fc[rs < 1.0] = fc_lsr[rs < 1.0]
fc[rs >= 1.0] = fc_gtr[rs >= 1.0]
else:
fc = fc_gtr
if rs < 1.0:
fc = fc_lsr
elif param == 'PW92':
# J. P. Perdew and W. Yang, Phys. Rev. B 45, 13244 (1992).
q = 2*A*(beta1*rsh + beta2*rs + beta3*rsh**3 + beta4*rs**2)
dq = A*(beta1/rsh + 2*beta2 + 3*beta3*rsh + 4*beta4*rs)
ddq = A*(-beta1/2.0/rsh**3 + 3.0/2.0*beta3/rsh + 4*beta4)
d_ec_d_rs = 2*A*( -alpha*np.log(1.0 + 1.0/q) + (1.0 + alpha*rs)*dq/(q**2 + q) )
d2_ec_d_rs2 = 2*A/(q**2 + q)*( 2*alpha*dq + (1.0 + alpha*rs)*( ddq - (2*q + 1.0)*dq**2/(q**2 + q) ) )
fc = rs/(9.0*n)*(rs*d2_ec_d_rs2 - 2*d_ec_d_rs)
return fx + fc
def lda_derivs(dv,param='PZ81'):
rs = dv['rs']
n = dv['n']
kf = dv['kF']
rsh = dv['rsh']
if param == 'PZ81':
eps_c = gu/(1.0 + b1u*rsh + b2u*rs)
eps_c_lsr = au*np.log(rs) + bu + cu*rs*np.log(rs) + du*rs
if hasattr(rs,'__len__'):
eps_c[rs < 1.0] = eps_c_lsr[rs < 1.0]
else:
if rs < 1.0:
eps_c = eps_c_lsr[rs < 1.0]
d_eps_c_d_rs = -gu*(0.5*b1u/rsh + b2u)/(1.0 + b1u*rsh + b2u*rs)**2
d_ec_drs_lsr = au/rs + cu + cu*np.log(rs) + du
if hasattr(rs,'__len__'):
d_eps_c_d_rs[rs < 1.0] = d_ec_drs_lsr[rs < 1.0]
else:
if rs < 1.0:
d_eps_c_d_rs = d_ec_drs_lsr
elif param == 'PW92':
q = 2*A*(beta1*rsh + beta2*rs + beta3*rsh**3 + beta4*rs**2)
dq = A*(beta1/rsh + 2*beta2 + 3*beta3*rsh + 4*beta4*rs)
eps_c = -2*A*(1.0 + alpha*rs)*np.log(1.0 + 1.0/q)
d_eps_c_d_rs = 2*A*( -alpha*np.log(1.0 + 1.0/q) + (1.0 + alpha*rs)*dq/(q**2 + q) )
else:
raise SystemExit('Unknown LDA, ',param)
return eps_c,d_eps_c_d_rs | AKCK-LFF | /AKCK_LFF-1.0.1-py3-none-any.whl/AKCK_LFF/alda.py | alda.py |
# AKRUP
[](https://pypi.org/project/AKRUP/)
AKRUP: Ancestral Karyotype Reconstruction Universal Pipeline
| | |
| ------- | ----------------------------------------------- |
| Author | wangjiaqi |
| Email | <[email protected]> |
| License | [BSD](http://creativecommons.org/licenses/BSD/) |
## Description
Include "bottom-up" inferences of ancestral karyotypes and "top-down" inferences of ancient chromosome evolutionary trajectories
## Installation
+ **Pypi**
~~~
pip install AKRUP
~~~
**The AKRUP requires the following dependencies :**
- Python (>3.7) with matplotlib, *biopython*, *click*, *numpy*,*pandas*, and *scipy* libraries.
**Note that the current version no longer supports python 2.**
- R with the ggridges and *ggplot2* packages.
- perl with the *[BioPerl](https://metacpan.org/pod/BioPerl)* packages.
**Dependent third party software that has been integrated:**
| | |
| ------- | ----------------------------------------------- |
| **clustalw2** | **[Larkin et al., 2007](https://academic.oup.com/bioinformatics/article/23/21/2947/371686)** |
| **ColinearScan** | **[Wang et al., 2006](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-7-447)** |
| **blast+** | **[Camacho et al., 2009](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-10-421)** |
## Tips
+ Default parameters, configuration files and usage instructions for all subroutines of the AKRUP software are provided. Please consult the documentation at the [Wiki](https://github.com/Genome-structure-evolution-analysis/AKRUP/wiki).
+ Detailed user tutorials and test data are provided. Please consult the documentation on [AKRUP-example](https://github.com/Genome-structure-evolution-analysis/AKRUP-example).
+ Provides ancestral karyotypes for multiple phylogenetic nodes of monocots. Please consult the documentation on [monocots-karyotype-evolution](https://github.com/Genome-structure-evolution-analysis/monocots-karyotype-evolution).
## Citations
If you use AKRUP for your work, please cite:
| AKRUP | /AKRUP-1.0.5.tar.gz/AKRUP-1.0.5/README.md | README.md |
import requests
import os
import json
from .log import log
from .__init__ import __version__
from .__init__ import json_edit
from random import randint
import webbrowser
__main_path__ = os.path.dirname(__file__)
file_name = __file__.split('/')[-1]
class message:
def __init__(self):
self.url_message = "https://raw.githubusercontent.com/oaokm/AL-Khatma/main/DATA/message.json"
self.info = json.load(open(f'{__main_path__}/DATA/info.json', 'r+'))
def cheak_version(self, show_me_last=False):
#? احتمالية ظهور الرسالة 2/5
if randint(1, 5) == 3:
try:
JSONWEB = requests.get(url=self.url_message, timeout=5)
log(
f'{file_name} > message system | Status JSON File Cheak',
f'Read: True, url: {self.url_message}'
).write_message()
log(
f'{file_name} > message system | Status Code to request json file',
f'code: {JSONWEB.status_code}, url: {self.url_message}'
).write_message()
if JSONWEB.status_code == 200:
JSONFILE = JSONWEB.json()
if JSONFILE[0]['version'] != __version__:
json_edit(f'{__main_path__}/DATA/info.json').edit('Is_a_new', "False")
log(
f'{file_name} > message system | Found New Version',
f'We found new version({JSONFILE[0]["version"]}), Please enter "pip install AL-Khatma-lib -U" or "pip install AL-Khatma-lib --upgrade"'
).write_message()
print(JSONFILE[1][0]['Message'].format(JSONFILE[0]["version"]))
elif JSONFILE[0]['version'] == __version__ and self.info['Is_a_new'] == "False":
json_edit(f'{__main_path__}/DATA/info.json').edit('Version', __version__)
json_edit(f'{__main_path__}/DATA/info.json').edit('Is_a_new', "True")
print("\n\n\t Now opening update page on github, Please wait... \n\n")
webbrowser.open_new_tab('https://github.com/oaokm/AL-Khatma/blob/main/UPDATE.md')
else:
log(
f'{file_name} > message system | Last Version',
f'This is version ({__version__}) is last.'
).write_message()
if show_me_last: print(f'This is version ({__version__}) is last.')
else:
pass
#? في حال عدم وجود إنترنت يتم رفض الأمر
except requests.exceptions.ConnectionError as e:
log(
f"{file_name} > message system | Check The Internet Status",
f"The WiFi connection error, please check your internet, {e}"
).write_message()
else:
pass | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/message.py | message.py |
from .log import log
from .message import message
from .quran import Quran
from tqdm import tqdm
from fpdf import FPDF
import os
__main_path__ = os.path.dirname(__file__)
file_name = __file__.split('/')[-1]
def Latin_to_english(text:str):
Letters ={
'ā':'a',
'ḥ':'h',
'ī':'i',
'Ā':'A',
'‘':'',
'’':'',
'Ḥ':'H',
'ṣ':'s',
'ū':'u',
'Ṭ':'T',
'ū':'u',
'ṭ':'t',
'Ṣ':'S',
'ḍ':'d',
'Ḍ':'D',
}
for i in text:
try:
text =text.replace(i, Letters[i])
except KeyError:
continue
return text
def add_to_table(From:int, To:int):
# النتيجة
results = []
# عدم تكرار أسم السورة
dont_dap = []
From = From
to = To
stop = False
quran_pages = Quran().quran_blocks()
#* التحقق من القيم
if From and to <= 604 and to >= From and (From and to) != 0:
for quran_cheak in quran_pages:
#* تم وضع رينج أو حد يقوم البرنامج بإتباعة
#? إذا كان رقم الصفحة أكبر من أو يساوي صفحة البداية يتم تنفيذ الأمر
if quran_cheak['page'] >= From and quran_cheak['page'] <= to :
#? يتم تفعيل متغير التوقف عند دخول الحد الموضوع
stop = True
#? هذا الشرط إذا تحقق سوف يسجل اسم السورة في قائمة؛ وهذا لمنع تكرار القيم
if not quran_cheak['translation'] in dont_dap:
dont_dap.append(quran_cheak['translation'])
results.append(quran_cheak)
#? هذا الشرط يمنع تكرار أرقام الصفحات، وينظر إلى الصفحة الأخيرة والأية الأخيرة في الصفحة
elif quran_cheak['verses_number'] == quran_cheak['total_verses'] or quran_cheak['page'] == to:
try:
#? هذا الشرط يتحقق من الأية الأخير في الصفحة الأخير. وذلك عن طريقة التحقق من الصفحة التالية
if quran_pages[quran_cheak['ID']]['page'] == to+1:
results.append(quran_cheak)
#? هذا الشرط يتحقق من الأية الأخيرة في السورة، إلى أن يصل إلى الصفحة الأخير الذي أدخلها المستخدم
elif dont_dap[-1] == quran_cheak['translation'] and quran_cheak['verses_number'] == quran_cheak['total_verses']:
results.append(quran_cheak)
else:
continue
#? نفس الشروط السابقة داخل هذه الحلقة؛ ولكن المفارقة عندما يظهر لنا هذا الخطأ يتم تنفيذ هذا الشرط
except IndexError:
if quran_pages[quran_cheak['ID']-1]['page'] == to+1:
results.append(quran_cheak)
elif dont_dap[-1] == quran_cheak['translation'] and quran_cheak['verses_number'] == quran_cheak['total_verses']:
results.append(quran_cheak)
else:
continue
else:
#* يتم التوقف عند الإنتهاء من الحد
if stop: break
else:
print("I have exceeded 114 Quranic chapters. Please adhere to the number of Quranic chapters (from 1 to 114)")
table = [["Sura name", "From", "To", "Start Page", "End Page", "Pages"]]
out = list()
for write in range(len(results)//2):
info = results[write*2:(write+1)*2]
table.append([
Latin_to_english(str(info[0]['Transliteration'])),
str(info[0]['verses_number']),
str(info[-1]['verses_number']),
str(info[0]['page']),
str(info[-1]['page']),
str((info[-1]['page']-info[0]['page'])+1)]
)
SUM=int()
for p in range(len(table[1:])):
# if table[1:][p][3] != table[1:][p-1][4] and p != 0 : SUM += int(table[1:][p][-1])
# elif p == 1 and table[1:][p][3] == table[1:][p-1][4] : SUM += int(table[1:][p][-1])
# else: continue
SUM += int(table[1:][p][-1])
table.append(['', '', '', '', '', str(SUM)])
return table
class PDF(FPDF):
def header(self):
self.image(
f"{__main_path__}/Logo/AL_Khatma_logo_one.png",
x= 79.02 ,
y= 7.38,
w= 51.97,
h= 12.92
)
self.ln(20)
def footer(self):
self.set_y(-15)
self.set_font("times", "IB", 11)
self.cell(0, 10, f"{self.page_no()}/{{nb}}", align="C")
def cover(self ,title:str, TABLE_DATA):
self.add_page()
# pdf.cell(0,20,title, ln=True)
self.ln(h=50)
self.set_font("Times","B", size=23)
self.cell(txt=title, h=-10)
self.set_font("Times", size=16)
with self.table(borders_layout="SINGLE_TOP_LINE", text_align="CENTER") as table:
for data_row in TABLE_DATA:
row = table.row()
for datum in data_row:
row.cell(datum)
class quran_pdf:
def __init__(self, block=list()):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
self.quran = Quran()
self.block = block
self.pic_path = f"{__main_path__}/Pictures"
log(
f'{file_name} > quran_pdf | A values',
f'block: {self.block}'
).write_message()
if not os.path.exists(path=f"{self.pic_path}/quran_pages"):
log(
f'{file_name} > quran_pdf | Create a folder',
f'Create a folder which will have high resolution images of the Quran'
).write_message()
os.makedirs(f"{self.pic_path}/quran_pages")
else:
pass
def download_pages(self):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f'{file_name} > download_pages | Download a pages of Quran',
f'Download a pages of Quran'
).write_message()
self.quran.page_pic([page for page in range(1, 605)], 'm-madinah_tafsir', self.pic_path, 'quran_pages')
def creating(self, path_pdf:str, From=int(), to=int(), cover=True, cover_title=str()):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f'{file_name} > creating | A values for creating',
f'path_pdf: {path_pdf}, From: {From}, to: {to}, cover: {cover}, cover_title: {cover_title}'
).write_message()
pdf = PDF("P", "mm", "A4")
if self.block == []:
start_page = From
end_page = to
log(
f'{file_name} > creating | A values for Start and End page',
f'Start page: {start_page}, end page: {end_page}'
).write_message()
else:
start_page = self.block[0]['page']
end_page = self.block[-1]['page']
log(
f'{file_name} > creating | A values for Start and End page',
f'Start page: {start_page}, end page: {end_page}'
).write_message()
if cover_title == '': cover_title = f'Sura Information(From: {start_page} to: {end_page} Page):'
else: pass
if cover:
pdf.cover(cover_title,
add_to_table(start_page, end_page)
)
#* get total page numbers
pdf.alias_nb_pages()
#* Page Break
pdf.set_auto_page_break(auto=True, margin=15)
if not os.path.exists(path=path_pdf): os.mkdir(path=path_pdf)
try:
PDF_name = os.path.abspath(f'{path_pdf}/Quran_from_{start_page}_to_{end_page}.pdf')
print(f"# Creating PDF file ... [{PDF_name}]")
for i in tqdm(range(start_page, end_page+1)):
pdf.add_page()
pdf.image(f'{__main_path__}/Pictures/quran_pages/{i}.png', w=169.12, h=250.46, x=20.44, y=25.10)
pdf.output(PDF_name)
except FileNotFoundError:
print("Opes! We found lost a files. now download all pages of Quran .... ")
self.download_pages() | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/pdf_page.py | pdf_page.py |
from .log import log
from .message import message
import json
import os
from time import perf_counter
__main_path__ = os.path.dirname(__file__)
file_name = __file__.split('/')[-1]
def show_me_files(mess=str()):
if mess != "": print(mess)
books = os.listdir(path=f'{__main_path__}/DATA/Tafser/')
print(f"The Available Tafser The Quran:")
for i in range(len(books)):
nameFile = books[i].split(".")[0]
print(f"[{i}] {nameFile}")
class tafser:
def __init__(self, tafser_book:str):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
try:
if tafser_book != '?' :
self.tafser = json.load(open(f'{__main_path__}/DATA/Tafser/{tafser_book}.json', 'r'))
log(
f'{file_name} > tafser > tafser_book | Book ',
f'The book of tafser is : {tafser_book}'
).write_message()
else:
log(
f'{file_name} > tafser > tafser_book | Request books list',
f'The user a request book list for tafser'
).write_message()
show_me_files()
except FileNotFoundError:
show_me_files(mess="[tafser]: The Books is not available.")
def call_block(self,
sura:int,
aya=list(),
report=False
):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
if sura <= 114 :
#? تسجيل النتائج
results = list()
try:
start = perf_counter()
#* البحث في قاعدة بيانات التفسير
for search in self.tafser:
#* تطابق عدد رقم السورة من قاعدة البيانات مع رقم السورة الذي وضعها المستخدم
if search['Sura'] == sura and aya == []:
results.append(search)
#* نفس الشرط السابق، المُفارقة هي طلب أيات محددة
elif search['Sura'] == sura and aya != []:
#? ترتيب الأرقام
aya.sort()
#* حلقة البحث عن المتطابقات.
for num in range(len(aya)):
if search['Sura'] == sura and aya[num] == search['verses_number']:
results.append(search)
else:
continue
else:
continue
end = perf_counter()
if report: print(f"[REPORT]\nRuning Time: {end-start}\nNumber of Results: {len(results)}\nResults: {json.dumps(results, indent=4, ensure_ascii=False)}")
return results
except AttributeError as e:
log(
f'{file_name} > tafser | Entering ',
f'Falus | The user has been entered for an unavailable tafser'
).write_message()
show_me_files(mess=f'[{file_name}] Sorry, the tafser is your enter is not available.')
log(
f"{file_name} > call_block | Verses Over Error",
f"I have exceeded 114 Quranic chapters. Please adhere to the number of Quranic chapters (from 1 to 114)."
).write_message()
print('I have exceeded 114 Quranic chapters. Please adhere to the number of Quranic chapters (from 1 to 114).')
return (False,)
def all_blocks(self):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f'{file_name} > all_blocks | request all blocks on Databeas',
f'The user a request all blocks on Databeas'
).write_message()
return self.tafser
def searching(self, text:str, report=False):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
if text != '':
results = list()
start = perf_counter()
for tafserSearch in self.tafser:
Tafser_text = tafserSearch['Text_without_diacritical']
search = Tafser_text.find(text)
if search != -1:
results.append(tafserSearch)
else:
continue
end = perf_counter()
if report: print(f"[REPORT]\nRuning Time: {end-start}\nResults Search: {json.dumps(results, indent=4, ensure_ascii=False)}\nThe Number Of Search Results: {len(results)}")
return results
else:
log(
f"{file_name} > Quran > searching | The Search Status",
f"The search failed; the user has not made any input in search zone or input empty."
).write_message()
print("[tafser > searching]: The search failed; the user has not made any input in search zone or input empty.") | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/tafser.py | tafser.py |
from .log import log
from .message import message
from .tafser import tafser
import json
import requests
import os
from tqdm import tqdm
import urllib3
from time import perf_counter
__main_path__ = os.path.dirname(__file__)
file_name = __file__.split('/')[-1]
class Quran:
def __init__(self, lang='main'):
"""
Quran(class):
lang: اللغة التي تريد عرضها
"""
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
try:
#* قراءة ملف اللغات
# os.chdir(path=__main_path__)
self.quran = json.load(open(f"{__main_path__}/DATA/Language/{lang}.json", "r", encoding="utf8"))
log(
f'{file_name} > Quran | Status JSON File ',
f'Read: True, Language: {lang}'
).write_message()
except FileNotFoundError as e:
#* في حال طلب المستخدم استعراض اللغات
if lang == '?':
log(
f'{file_name} > Quran | Show All Language ',
f'The user has requested to view the available languages of the Quran'
).write_message()
lang = os.listdir(path=f'{__main_path__}/DATA/Language')
print('the available languages of the Quran'.title())
for i in range(len(lang)):
nameFile = lang[i].split(".")[0]
if nameFile == 'main': print(f"[{i}] ar, en ({nameFile})")
else: print(f"[{i}] {nameFile}")
else:
log(
f'{file_name} > Quran | Status JSON File ',
f'Falus | The user has been entered for an unavailable language'
).write_message()
print('[Quran] Sorry, the language is your enter is not available.\nthe available languages of the Quran:')
#// إنتبه: قم بتغير مسار لكي لا تحصل مشاكل
#// قم بوضع طريقة لقراءة جميع الملفات دون مشاكل عدم معرفه موقعها
lang = os.listdir(path=f'{__main_path__}/DATA/Language')
for i in range(len(lang)):
nameFile = lang[i].split(".")[0]
if nameFile == 'main': print(f"[{i}] ar, en ({nameFile})")
else: print(f"[{i}] {nameFile}")
def show_block_aya(
self,
verses_no:int,
verses_number:list,
tafser_aya=False,
tafser_type=str(),
orderly=False,
):
"""
show_block_aya(func): هي دالة تقوم بإستخراج ما يدرده المستخدم من ملف اللغة
verses_no(int): أستخراج السورة التي تريدها
verses_number(list): في حالة أنك تريد سورة بعينها يمكنك ذلك بكتابة رقم أيتها. ويمكن أن تجعلعا فارغة
orderly(bool): إذا أردت أن ترى النتيجة مطبوعة بشكل يمكن قراءتها
"""
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f"{file_name} > Quran > show_block_aya | info",
f"verses_no: {verses_no}, verses_number: {verses_number}, orderly: {orderly}"
).write_message()
if verses_no <= 114:
log(
f"{file_name} > Quran > show_block_aya | number of Surahs of the Qur’an is good",
f"The number entered did not exceed the number of Surahs of the Qur’an"
).write_message()
#? لتدوين أيات جميع السورة المحددة
results = list()
#? تدوين أعداد السورة المطلوبة
verses = list()
#* يحدث عملية البحث عن السورة المطلوبة
for search in range(len(self.quran)):
#* التأكد من السورة المطلوبة من ترتيبها
if self.quran[search]['verses_no'] == verses_no:
#* تسجيل جميع الأيات السورة في قائمة + تسجيل الأيات المطلوبة في قائمة منفصلة(إن وجد)
for i in range(self.quran[search]['total_verses']):
results.append(self.quran[search+i])
#? هنا تحدث عملية رصد الأيات المطلوبة تزامُنا مع علميلة رصد أيات السورة المطلوبة
for cheaking in verses_number:
#? التحقق من أن الأية المطلوبة ضمن عدد أيات السورة + التأكد من الأية بعينها لإضافتها في قائمة مخصصة
if self.quran[search+i]['total_verses'] >= cheaking and cheaking == self.quran[search+i]['verses_number']:
verses.append(self.quran[search+i])
else:
continue
#? نتائج البجث عن تفسير الأيات، يتم تحديدها من رقم السورة أو رقم السورة وأيات يتم تحديدها من "verses_number"
results_tafser = list()
if tafser_aya:
taf = tafser(tafser_book='muyassar')
results_tafser = taf.call_block(verses_no, verses_number)
#* طباعة تقرير مُنسق مع إرجاع ثلاثة قيم للعمليات السابقة
if orderly:
txt = ('results'.upper(), 'verses'.upper())
print(f'\n\n\n{txt[0]:.^50}\n\n\n')
print(json.dumps(results, indent=4, ensure_ascii=False))
print(f'\n\n\n{txt[1]:.^50}\n\n\n')
print(json.dumps(verses, indent=4, ensure_ascii=False))
return (results, verses, results_tafser)
#* إرجاع ثلاثة قيم
else: return (results, verses, results_tafser)
#? هذا في حالة عدم إجاد السورة فإنه يستمر حتى يجد السورة المطلوب
else: continue
#? في حالة تجاوز 114 سورة قرأنية يتم رفض الطلب
else:
log(
f"{file_name} > Quran > show_block_aya | Verses Over Error",
f"I have exceeded 114 Quranic chapters. Please adhere to the number of Quranic chapters (from 1 to 114)."
).write_message()
print('I have exceeded 114 Quranic chapters. Please adhere to the number of Quranic chapters (from 1 to 114).')
return (False,)
#* هذا للبحث عن كلمة أو مجموعة كلمات في القرآن
def searching(
self,
text:str,
search_second_lang=False,
print_report=False
):
"""
searching(func): هي دالة للبحث بين السطور القرآنية
text(str): الكلمة المراد بحثها
search_second_lang(bool): في حال تفعيل هذه الخاصية سيتم البحث بنائا على اللغة الثانية في ملف اللغة
print_report(bool): في حال تفعيل هذه الخاصية قوم بطباعة تقرير منظم عن نتائج البحث
"""
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f"{file_name} > Quran > searching | Search Info",
f"Search: {text}, search_my_lang: {search_second_lang}, print_report: {print_report}"
).write_message()
start = perf_counter()
#? في حال إذا كان النص فارغ لا يتم تنفيذ الطلب
if text != '':
log(
f"{file_name} > Quran > searching | The Search Status",
f"True"
).write_message()
end = perf_counter()
# * يكون جميع النص lower
text = text.lower()
#* لتحزين النتائج
results = list()
#? في حال إذا كان خيار search_my_lang مفعل، تقوم خوارزمية للكشف عن نص اللغة الأخرى
lang = ['English', 'Bengali', 'Chinese', 'Spanish', 'French', 'Indonesian', 'Russian', 'Swedish', 'Turkish', 'Urdu']
if search_second_lang:
try:
#* استكشاف اللغة المناسبة من ملف اللغة
for LANG in lang:
lang_ues = f'verses_text_{LANG.lower()}'
verses_text = self.quran[0][lang_ues]
break
except KeyError:
lang_ues = 'verses_text_without_diacritical'
#* تبدأ هنا عملية البحث
for i in range(len(self.quran)):
#? يتم تطبيق هذا الأمر في حالة تفعيل خيار search_my_langsearch_my_lang
if search_second_lang:
verses_text = self.quran[i][lang_ues].lower()
search = verses_text.find(text)
#? في حال عدم تفعيل search_my_lang فإنهُ يتم تنفيذ الأمر
else:
verses_text = self.quran[i]['verses_text_without_diacritical']
search = verses_text.find(text)
#* التحقق من عملية البحث
#? -1 يمثل أن قيمة البحث سلبية، وهذا يعني أن النص المطلوب غير متوفر
if search != -1:
results.append(self.quran[i])
else:
continue
#* إرجاع قيمة البحث
end = perf_counter()
if print_report: print(f"[REPORT]\nRuning Time: {end-start}\nCount Search: {len(results)}\nResult Search: {json.dumps(results, indent=4, ensure_ascii=False)}")
return results
else:
log(
f"{file_name} > Quran > searching | The Search Status",
f"The search failed; the user has not made any input in search zone or input empty."
).write_message()
end = perf_counter()
if print_report: print(f"[REPORT]\nRuning Time: {end-start}\nResults Search: {json.dumps(results, indent=4, ensure_ascii=False)}\nThe Number Of Search Results: {len(results)}")
return (False, )
#? لإرجاع قيمة self.quran
def quran_blocks(self):
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f'{file_name} > quran_blocks | Return All The Quran Data ',
f'The user return all data quran from json file'
).write_message()
return self.quran
#* تحميل صفحات القرآن الكريم
def page_pic(
self,
page:list,
type:str,
path:str,
name_folder:str,
return_imge=False):
"""
page_pic(func): دالة تقوم على تحميل صفحات القرآن من الإنترنت
page(list): تقوم بوضع عدد الأيات التي تريد تحميلها
path(str): المسار تنزيل الصور
name_folder(str): أسم الملف الذي ستكون فيها الصور الصفحات
return_imge(bool): في حال تم تفعيل الخيار، سيتم تحميل الصفحات على شكل بايت (Binary)
"""
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
#? التحقق من موجود إنترنت
try:
requests.get('https://github.com/oaokm')
log(
f"{file_name} > Quran > page_pic | Check The Internet Status",
f"The internet is good"
).write_message()
#* قراءة ملف الذي يحتوي على رابط صفحات القرأن كاملة وبجودة عالية
quran_books = json.load(open(f'{__main_path__}/DATA/quran_books.json', 'r', encoding='utf8'))
try:
log(
f"{file_name} > Quran > page_pic | Check For Read JSON File",
f"The file (quran_books.json) is good"
).write_message()
web_pic = quran_books[type]
except KeyError as e:
log(
f"{file_name} > Quran > page_pic | Check For Read JSON File",
f"Error(keyError): {e}"
).write_message()
print(f'[ Quran > page_pic | KeyError ]: {e}')
#* في حالة إذا كان خاصية (return_imge) مفعلة يتم تسجيل محتويات الصورة كبِّت(صيغة ثنائية)
pics = list()
#? هنا تبدأ عملية الوصول للصفحات وتحميلها
PATH = f'{path}/{name_folder}'
print(f"# Download Pages From Quran | Type: {type} | Path:[{os.path.abspath(PATH)}] | From: {page[0]} to {page[-1]}")
for p in tqdm(page):
#? التحقق من إذا كان المدخل لا يتخطى عدد صفحات القرأن
if p <= 604:
#* عملية الإتصال بالموقع لسحب الصورة الصفحة
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = web_pic.format(p)
r_page = requests.get(url=url, verify=False)
#* عملية إنشاء ملف تمهيدًا لتحميل الصفحات
#? في حال كان خيار return_imge مفعل فسوف يتم تحميل الصور كبٍّت(النظام الثنائي)
if return_imge:
pics.append([p, r_page.content])
else:
#! في حال إذا الملف الذي أدخل المستخدم غير موجود على القرص سيتم تنزيل مباشرًا
if not os.path.exists(PATH):
os.makedirs(PATH)
#* نحميل الصور
with open(f"{PATH}/{p}.png", 'wb') as f:
f.write(r_page.content)
f.close()
else:
print('I have exceeded 604 Page. Please adhere to the number of Page (from 1 to 604).')
#? في جال إذا خيار return_imge مُفعل، يتم إرجاع قيمتها
if return_imge:
return pics
#? في حال عدم وجود إنترنت يتم رفض الأمر
except requests.exceptions.ConnectionError as e:
log(
f"{file_name} > Quran > page_pic | Check The Internet Status",
f"The WiFi connection error, please check your internet"
).write_message()
print(f"[ Quran | page_pic > The WiFi connection error ] {e}")
if __name__ == '__main__':
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
print('[quran.py] This file is not to run') | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/quran.py | quran.py |
from .quran import Quran
from .message import message
from .log import log
from .pdf_page import quran_pdf
from tqdm import tqdm
from time import perf_counter
import os
__main_path__ = os.path.dirname(__file__)
file_name = __file__.split('/')[-1]
class khatma:
def __init__(self, days:int):
log(
f"{file_name} > khatma | Status The Class",
f"True, days(int): {days}"
).write_message()
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
self.days = days
self.werrd_page = list()
self.quran = Quran().quran_blocks()
#* دالة تقوم على تقسيم صفحات القرآن لهدف إنهاء قراءة القرآن في يوم معين
def Khatma_page(self, report=False, werrd=False, pdf=False, down_path_pdf=str()):
"""
Khatma_page(func): هي دالة تقوم بتقسيم صفحات القرآن الكريم بهدف إنهاء قراءتها
report(bool): في حال تفعيل الخيار يتم طباعة تقرير عن النتائج التي تم تحليلها من هذه الدالة
werrd(bool): في حال تفعيل الخيار تتم عملية تقسيم القرآن بناءًا على عدد الأيام الذي أدخلها المستخدم. في حال عدم تفعيل هذا الخيار يقوم البرنامج بإعادة قيمة عدد الصفحات اليومية الازمة لإنهاء قراءة القرآن كاملة
pdf(bool): في حال تفعيل هذا الخيار، سيتم تطبيق التقسيم وإنشاء ملفات بي دي أف للقرأن
dwon_path_pdf(str): مسار تنزيل ملفات البي دي أف
"""
#! التحقق من وجود تحديث جديد للمكتبة
message().cheak_version()
log(
f"{file_name} > khatma > Khatma_page | Info",
f"Days:{self.days}, report: {report}, werrd: {werrd}"
).write_message()
#* عدد صفحات القرآن الكريم حسب نسخة مجمع الملك فهد لطباعة المصحف الشريف
pages = 604
#* الأيام التي نريد ختم القرآن به
Fdays = self.days
Fdays_pages = list()
#* بأستخدام الخوارزمية الجشعة لتقسيم صفحات القرأن الكريم
deno = [int(pages/Fdays), 2, 1]
for day in deno:
while day <= pages:
Fdays_pages.append(day)
pages -= day
else:
continue
#* التحقق من أن عدد العناصر في Fdays_pages أكبر من عدد الأيام المدخلة
if len(Fdays_pages) > self.days:
#? هنا يقوم بعمل مُوازنة لعدد العناصر مع عدد الأيام في حال إذا كان عدد العناصر أكبر من عدد الأيام
rest = Fdays_pages[-(len(Fdays_pages) - Fdays):]
Fdays_pages_N = Fdays_pages[:-(len(Fdays_pages) - Fdays)]
#? تحدث هنا عملية الموازنة لكي تصبح متساوية مع عدد الأيام المدخلة
for i in range(len(rest)):
Fdays_pages_N[i] += rest[i]
report_pages_N = f"[REPORT KHATMA]\nDays: {self.days}\nReading Rate: {int(sum(Fdays_pages_N)/len(Fdays_pages_N))} (Page/Day)\nWeerd: {Fdays_pages_N}"
#? التحقق من أن مجموع الصفحات المقسمة تساوي عدد صفحات القرآن الكريم
if sum(Fdays_pages_N) == 604:
self.werrd_page = Fdays_pages_N
#* في حالة طلب المستخدم عدم تفعيل التقسيم على القرآن يتم تنفيذ هذه العملية
if werrd or pdf == False:
if report: print(report_pages_N)
return Fdays_pages_N
#! في حال وجود مشكلة يتم تنفيذ هذا الأمر، وهي مشكلة في عملية الموازنة
else:
log(
f"{file_name} > khatma > Khatma_page | Partition Error ",
f"An error has occurred that is not supposed to happen. Value: len(Fdays_pages) > self.days: {len(Fdays_pages) > self.days}, len(Fdays_pages):{len(Fdays_pages)}, Fdays_pages: {Fdays_pages}, days: {self.days}"
).write_message()
print("[ khatma > Khatma_page | Partition Error ] An error has occurred that is not supposed to happen, Visit the library's Issues page: https://github.com/oaokm/AL-Khatma/issues")
#* إذا كانت عملية التقسيم تساوية مع عدد الأيام يتم تنفيذ هذا الأمر مباشرًا
else:
report = f"[REPORT KHATMA]\nDays: {self.days}\nReading Rate: {int(sum(Fdays_pages)/len(Fdays_pages))} (Page/Day)\nWeerd: {Fdays_pages}"
#? التحقق من أن مجموع الصفحات المقسمة تساوي عدد صفحات القرآن الكريم
if sum(Fdays_pages) == 604:
self.werrd_page = Fdays_pages
if werrd or pdf == False:
if report: print(report)
return Fdays_pages
else:
if werrd or pdf == False:
if report: print(report)
return Fdays_pages
#* في حال تفعيل هذا الخيار، تبدأ عملية تقسيم القرآن الكريم على عدد الأيام المدخلة
if werrd:
"""
[تعريف المتغيرات]
0. page_per_day(list): يكون عدد العناصر في هذا المتغير مساوي لعدد الأيام المدخلة، وكل عنصر يحتوي على عدد الأيات التي يجب قرأتها
1. for_day(list): يتم تسجيل معلومات الأيات لكي تم لاحقًا إلى متغير page_per_day
2. stop_id(int): هذا المتغير يساعد في عملية التقسيم، ووضيفته هو معرفة مُعرف الأية الكريمة لتتم عملية التقسيم بسهولة
"""
page_per_day = list()
for_day = list()
stop_id = int() # int() = 0
#* تبدا هنا عملية التقسيم
print("The process of dividing the Quranic verses begins ... ")
for i in tqdm(range(1, len(self.werrd_page)+1)):
#? يأخذ quran قيمة self.quran مع آخر مُعرف تم التوقف عنه
for quran in self.quran[stop_id:]:
for_day.append(quran)
#? في حال أن عدد الأيات أكبر من مجموع الأيات الموجودة في قائمة التقسيم يتم تنفيذ العملية
if quran['page'] > sum(self.werrd_page[:i]):
#* تسجيل آخر مُعرف
stop_id = quran['ID']
#* تدوين الأيات إلى page_per_day + تصقير for_day + الإزاحة من عملية التكرار
page_per_day.append(for_day)
for_day = []
break
else:
continue
#* تسجيل آخر قيمة (اليوم الأخير)
if len(page_per_day) == len(self.werrd_page)-1:
page_per_day.append(self.quran[stop_id:])
else:
continue
report = f"[REPORT KHATMA]\nDays: {self.days}\nReading Rate: {int(sum(self.werrd_page)/len(self.werrd_page))} (Page/Day)\nWeerd: {self.werrd_page}\nNumber of Werrd: {len(page_per_day)}"
if report: print(report)
log(
f"{file_name} > khatma > Khatma_page > (werrd) | Status",
f"True"
).write_message()
#* إرجاع قيمة العملية
return page_per_day
#* تطبيق التقسيم مباشرًأ على pdf
elif pdf and down_path_pdf != '':
last_value = 1
pdf = quran_pdf()
start = perf_counter()
for day in range(len(self.werrd_page)):
print(f"# {day+1} of {self.days}\tFrom {last_value} -> {self.werrd_page[day]+last_value-1} Page")
pdf.creating('./weerds',
From=last_value, to=self.werrd_page[day]+last_value-1,
cover=True,
cover_title=f'Number of Weerd: {day+1} of {self.days} Days')
last_value += self.werrd_page[day]
end = perf_counter()
report = f"\n[REPORT KHATMA - PDF]\nRuning Time: {end-start}\nDays: {self.days}\nReading Rate: {int(sum(self.werrd_page)/len(self.werrd_page))} (Page/Day)\nWeerd: {self.werrd_page}\nNumber of Werrd: {len(self.werrd_page)}\nPath: {os.path.abspath(down_path_pdf)}"
if report: print(report)
elif down_path_pdf == '':print("[Khatma | PDF] The option (down_path_pdf) is False, Please change to True, like this:\n\n\tkhatma(30).Khatma_page(pdf=True, dwon_path_pdf='./weerds', report=True)\n\n")
else: pass | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/khatma.py | khatma.py |
import requests
import os
import json
from .log import log
from tqdm import tqdm
import platform
main_path = os.path.dirname(__file__)
system_path = {
"Linux": "/",
"Windows": "\\",
"Darwin": "/"
}
class cheak:
def __init__(self):
self.download_file = list()
self.path_url = list()
self.system_path = system_path
try:
self.system_path[platform.system()]
except KeyError:
log(
f"{__file__} > cheak > | Error: Key Error",
f"Switch path system to '/' , system: {platform.system()}, uname:{platform.uname()}"
).write_message()
self.system_path = '/'
try:
self.JSONFILE = requests.get(
url="https://raw.githubusercontent.com/oaokm/AL-Khatma/main/DATA/cheak_download.json").json()
except requests.exceptions.ConnectionError as e:
log(
f"{__file__} > cheak > __init__ | Check The Internet Status",
f"The WiFi connection error, please check your internet"
).write_message()
print(f"[ cheak | __init__ > The WiFi connection error ] {e}")
def find_DATA_folder(self, showme_log=False):
for block in self.JSONFILE:
path = f"{main_path}{self.system_path[platform.system()]}{block['name_folder']}"
for ch in block['files']:
cheak_file = os.path.exists(path=f"{path}{self.system_path[platform.system()]}{ch}")
if showme_log: print(f'[{path}{self.system_path[platform.system()]}{ch}] {cheak_file}')
if not cheak_file: self.download_file.append(f'{path}{self.system_path[platform.system()]}{ch}')
else:
continue
for i in range(len(self.download_file)):
text = self.download_file[i].split(self.system_path[platform.system()])
for y in range(len(text)):
if text[-y-1] == "AL_Khatma":
self.path_url.append("/".join(text[-y:]))
break
else: continue
return self.download_file
def download(self):
if self.path_url != []:
print(f"# Download DATA Folder in {os.path.dirname(self.download_file[0])}")
for download in tqdm(range(len(self.path_url))):
if not os.path.exists(os.path.dirname(self.download_file[download])):
os.makedirs(os.path.dirname(self.download_file[download]))
github_file = requests.get(
url=f"https://raw.githubusercontent.com/oaokm/AL-Khatma/main/{self.path_url[download]}"
)
if github_file.status_code == 200:
with open(self.download_file[download], 'wb') as f:
f.write(github_file.content)
f.close()
else:
print(f"[INFO]\nURL: {github_file.url}\tStatus Code: {github_file.status_code}")
else:
print("[cheak.py] Status folder is very good")
if __name__ == '__main__':
c = cheak()
c.find_DATA_folder()
c.download() | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/cheak.py | cheak.py |
import os
from .log import log
from .cheak import cheak
import json
import platform
#! لا تقم بتغير رقم الإصدار
#! Do not change the version
__version__ = '2.0.3'
__main_path__ = os.path.dirname(__file__)
class json_edit:
def __init__(self, name_file:str, refresh=False):
self.name_file = name_file
self.refresh = refresh
def info_file(self):
if not os.path.exists(path=f"{__main_path__}/DATA/info.json") or self.refresh:
INFO = {
"API_name": "AL-Khatma",
"Version": __version__,
"Is_a_new": "True",
"License": "MIT",
"Progjet_page": "https://github.com/oaokm/AL-Khatma",
"Author": "Osamah Awadh",
"system_tpye": platform.system(),
"version_system": platform.version(),
"platform": platform.platform(),
"refresh": "True"
}
with open(f"{__main_path__}/DATA/info.json", 'w') as info :
info.write(json.dumps(INFO, indent=4, ensure_ascii=False))
else:
pass
def edit(self, arge:str ,value):
with open(self.name_file, 'r+') as f:
data = json.load(f)
data[arge] = value
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
def Download_DATA():
log(
f'{__file__} | Download DATA Request ',
f'The user to been request the cheak and download files form Github'
).write_message()
json_edit(f'{__main_path__}/DATA/info.json', refresh=True).info_file()
down = cheak()
list_files = down.find_DATA_folder()
if list_files != []:
down.download()
else:
print("[Download_DATA | Status Scan] All files are uploaded")
def where_me():
log(
f'{__file__} | Where Me Request ',
f'The user to been request the path program'
).write_message()
return __main_path__
def show_me_log():
log(
f'{__file__} | Show Me Log Request ',
f'The user to been request the path log file'
).write_message()
return f"{__main_path__}/DATA/loging.log" | AL-Khatma-lib | /AL_Khatma_lib-2.0.3-py3-none-any.whl/AL_Khatma/__init__.py | __init__.py |
import os as _os
import shlex as _shlex
import contextlib as _contextlib
import sys as _sys
import operator as _operator
import itertools as _itertools
import warnings as _warnings
import pkg_resources
import setuptools.command.test as orig
from setuptools import Distribution
@_contextlib.contextmanager
def _save_argv(repl=None):
saved = _sys.argv[:]
if repl is not None:
_sys.argv[:] = repl
try:
yield saved
finally:
_sys.argv[:] = saved
class CustomizedDist(Distribution):
allow_hosts = None
index_url = None
def fetch_build_egg(self, req):
"""Specialized version of Distribution.fetch_build_egg
that respects respects allow_hosts and index_url."""
from setuptools.command.easy_install import easy_install
dist = Distribution({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links',
'site_dirs',
'index_url',
'optimize',
'site_dirs',
'allow_hosts',
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
if self.allow_hosts:
opts['allow_hosts'] = ('test', self.allow_hosts)
if self.index_url:
opts['index_url'] = ('test', self.index_url)
install_dir_func = getattr(self, 'get_egg_cache_dir', _os.getcwd)
install_dir = install_dir_func()
cmd = easy_install(
dist,
args=["x"],
install_dir=install_dir,
exclude_scripts=True,
always_copy=False,
build_directory=None,
editable=False,
upgrade=False,
multi_version=True,
no_report=True,
user=False,
)
cmd.ensure_finalized()
return cmd.easy_install(req)
class PyTest(orig.test):
"""
>>> import setuptools
>>> dist = setuptools.Distribution()
>>> cmd = PyTest(dist)
"""
user_options = [
('extras', None, "Install (all) setuptools extras when running tests"),
(
'index-url=',
None,
"Specify an index url from which to retrieve dependencies",
),
(
'allow-hosts=',
None,
"Whitelist of comma-separated hosts to allow "
"when retrieving dependencies",
),
(
'addopts=',
None,
"Additional options to be passed verbatim to the pytest runner",
),
]
def initialize_options(self):
self.extras = False
self.index_url = None
self.allow_hosts = None
self.addopts = []
self.ensure_setuptools_version()
@staticmethod
def ensure_setuptools_version():
"""
Due to the fact that pytest-runner is often required (via
setup-requires directive) by toolchains that never invoke
it (i.e. they're only installing the package, not testing it),
instead of declaring the dependency in the package
metadata, assert the requirement at run time.
"""
pkg_resources.require('setuptools>=27.3')
def finalize_options(self):
if self.addopts:
self.addopts = _shlex.split(self.addopts)
@staticmethod
def marker_passes(marker):
"""
Given an environment marker, return True if the marker is valid
and matches this environment.
"""
return (
not marker
or not pkg_resources.invalid_marker(marker)
and pkg_resources.evaluate_marker(marker)
)
def install_dists(self, dist):
"""
Extend install_dists to include extras support
"""
return _itertools.chain(
orig.test.install_dists(dist), self.install_extra_dists(dist)
)
def install_extra_dists(self, dist):
"""
Install extras that are indicated by markers or
install all extras if '--extras' is indicated.
"""
extras_require = dist.extras_require or {}
spec_extras = (
(spec.partition(':'), reqs) for spec, reqs in extras_require.items()
)
matching_extras = (
reqs
for (name, sep, marker), reqs in spec_extras
# include unnamed extras or all if self.extras indicated
if (not name or self.extras)
# never include extras that fail to pass marker eval
and self.marker_passes(marker)
)
results = list(map(dist.fetch_build_eggs, matching_extras))
return _itertools.chain.from_iterable(results)
@staticmethod
def _warn_old_setuptools():
msg = (
"pytest-runner will stop working on this version of setuptools; "
"please upgrade to setuptools 30.4 or later or pin to "
"pytest-runner < 5."
)
ver_str = pkg_resources.get_distribution('setuptools').version
ver = pkg_resources.parse_version(ver_str)
if ver < pkg_resources.parse_version('30.4'):
_warnings.warn(msg)
def run(self):
"""
Override run to ensure requirements are available in this session (but
don't install them anywhere).
"""
self._warn_old_setuptools()
dist = CustomizedDist()
for attr in 'allow_hosts index_url'.split():
setattr(dist, attr, getattr(self, attr))
for attr in (
'dependency_links install_requires tests_require extras_require '
).split():
setattr(dist, attr, getattr(self.distribution, attr))
installed_dists = self.install_dists(dist)
if self.dry_run:
self.announce('skipping tests (dry run)')
return
paths = map(_operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
return self.run_tests()
@property
def _argv(self):
return ['pytest'] + self.addopts
def run_tests(self):
"""
Invoke pytest, replacing argv. Return result code.
"""
with _save_argv(_sys.argv[:1] + self.addopts):
result_code = __import__('pytest').main()
if result_code:
raise SystemExit(result_code) | ALClassifier4SS | /ALClassifier4SS-0.0.1.tar.gz/ALClassifier4SS-0.0.1/.eggs/pytest_runner-6.0.0-py3.8.egg/ptr/__init__.py | __init__.py |
# ALMAFE-ConfigDelivery package
Classes for retrieval and transformation of ALMA Front End subassembly configuration data
## ConfigDelivery.CCA6
### CCA6Database
Access/update the ALMA band 6 test data database
### DataDelivery, DeliveryConfig, ConfigFiles
Collect CCA6 measurement results and operating parameters in the required format for delivery to ALMA.
## ConfigDelivery.FEMC
Class for creating the required INI files for the FEMC module from the XML data delivery packages and/or TMCDB XML files.
Work in progress.
## ConfigDelivery.Plot.WCA
Produce plots required for WCA config delivery
## ConfigDelivery.WCA
### WCADatabase
Access/update the WCA tables in the FETMS database and produce WCA config delivery files
### WCADatafiles
Access the WCA test data files storage area under \\CVFiler\LO
### WCAOutputPower
Access/update the WCA output power tables int he FETMS database
## FETMSDatabase
### TestDataHeader
Access/update test data header records in the FETMS database.
| ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/README.md | README.md |
from ALMAFE.basic.ParseTimeStamp import makeTimeStamp
import os.path
import csv
import glob
import re
import tempfile
import zipfile
class WCADataFiles(object):
'''
Class for accessing WCA raw measurment data on CVFiler
'''
DATA_FILES_ROOT = r"\\cvfiler\LO\ALMA Documentation NEW"
def __init__(self):
'''
Constructor
'''
def openWCADataPackage(self, band, serialNum, showProgress = False):
# metadata for found file will go here:
item = None
# make the WCA's root path:
path = r"{:s}\Band_{:d}\WCA\SN{:03d}\Test Data".format(self.DATA_FILES_ROOT, band, serialNum)
if os.path.exists(path):
# look here for the 'WCAs.CSV' file:
item = self.findCSV(path, showProgress = showProgress)
# if item is True but not a dict, we will skip looking in zip files.
if not item:
# not found. look for ZIP files...
pattern = os.path.join(path, "*.zip")
files = glob.glob(pattern)
if files:
toOpen = None
maxRMA = 0
if len(files) == 1:
# just one found:
path = files[0]
toOpen = path
else:
# loop to find the highest RMA zip file:
for path in files:
if showProgress:
print(path)
file = os.path.basename(path)
# find RMA<int> in name, with or without space between:
RMA = re.findall(r"RMA\s*\d+", file)
if RMA:
# make int removing 'RMA' and stripping any spaces:
RMA = int(RMA[0][3:].strip())
if RMA > maxRMA:
# newer than previously seen
maxRMA = RMA
toOpen = path
elif not toOpen:
# it might be something like "Production Test Data.." in which case we'll use it if no 'RMA':
Production = re.findall("Production", file)
if Production:
toOpen = path
if toOpen:
if showProgress:
print("opening {0}".format(toOpen))
# make a local temp directory to unzip into:
unzipTo = tempfile.TemporaryDirectory(prefix="WCADB")
with zipfile.ZipFile(toOpen, 'r') as zip_ref:
zip_ref.extractall(unzipTo.name)
# search for the 'WCAs.CSV' file there:
item = self.findCSV(unzipTo.name, showProgress = showProgress)
if not item:
# not found; look for subdirectories:
pattern = os.path.join(unzipTo.name, "*Data*")
files = glob.glob(pattern)
if files:
# if found, search the first subdirectory:
item = self.findCSV(files[0], showProgress = showProgress)
# save the reference to the temp directory so it doesn't get deleted yet:
if item:
item['unzipTo'] = unzipTo
# if found and it is a dict:
if (item):
# don't trust the file's serialNum over our own:
item['serialNum'] = serialNum
return item
def readWCACSV(self, band = None, serialNum = None, showProgress = False):
'''
Read one or more 'WCAs.CSV' metadata files from CVFiler
:param band:int if provided, filter by band (1..10)
:param serialNum:str if provided find the record for a specific unit. Only has effect if band is provided.
:param showProgress: if True, print file names loaded
:return list of dict{band, serialNum, timeStamp, ESN, fLoYig, fHiYig, VG0, VG1}
'''
if not band:
# search all bands except band 2:
bands = [1, 3, 4, 5, 6, 7, 8, 9, 10]
else:
# search the specified band:
bands = [band]
if not serialNum:
# search all serialNums:
serialNums = [n for n in range(1, 99)]
else:
# search the specified serialNum:
serialNums = [int(serialNum)]
output = []
# loop over the specified band+serialNum combinations:
for band in bands:
for serialNum in serialNums:
# ignore pre-production band 5 units:
if band == 5 and serialNum >= 10:
# metadata for found file will go here:
item = self.openWCADataPackage(band, serialNum, showProgress = showProgress)
# if found and it is a dict:
if (item and not item is True):
output.append(item)
return output
def findCSV(self, path, showProgress = False):
'''
Search a directory for the 'WCAs.CSV' metadata file from CVFiler
If found, read it and return its contents.
:param path: directory to search
:param showProgress: if true and found, print the found path/filename
:return None if not found
dict{path, band, serialNum, timeStamp, ESN, fLoYig, fHiYig, VG0, VG1} if found and parsed
'''
pattern = os.path.join(path, "*WCAs*.csv")
files = glob.glob(pattern)
if files:
path = files[0]
found = self.readOneCSV(path)
if showProgress and found:
print("found {}".format(os.path.basename(path)))
return found
return None
def readOneCSV(self, path):
'''
Read a single 'WCAs.CSV' metadata file from CVFiler
:param path: the file to read
:return dict{path, band, serialNum, timeStamp, ESN, fLoYig, fHiYig} or None
may have items VG0, VG1 if found
'''
output = None
with open(path, 'r', newline='') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if not output:
try:
output = {'path' : path,
'band' : row[0],
'serialNum' : row[1],
'timeStamp' : makeTimeStamp(row[2]),
'ESN' : row[5],
'fLoYig' : row[6],
'fHiYig' : row[7]
}
except:
pass
try:
if output:
output['VG0'] = row[13]
output['VG1'] = row[14]
except:
if output:
output['VG0'] = None
output['VG1'] = None
pass
return output
def readOutputPower(self, band, serialNum, showProgress = False):
item = self.openWCADataPackage(band, serialNum, showProgress)
if item:
path = os.path.dirname(item['path'])
pattern = os.path.join(path, "*WCA_OUTPUT_POWER*.csv")
files = glob.glob(pattern)
if files:
if showProgress:
print("found {}".format(os.path.basename(files[0])))
return (files[0], self.readOneOutputPower(files[0], item['VG0'], item['VG1']))
else:
return (None, None)
def readOneOutputPower(self, path, VG0 = None, VG1 = None):
output = []
with open(path, 'r', newline='') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if len(row) == 1:
if not VG0:
# ! Pol0 : VG (V) set: -0.450
found = re.search(r"Pol0.*VG.*[+=]?(\d+(\.\d*)?|\.\d+)", row[0])
if found:
VG0 = float(found[0][-7:].strip())
if not VG1:
# ! Pol1 : VG (V) set: -0.450
found = re.search(r"Pol1.*VG.*[+=]?(\d+(\.\d*)?|\.\d+)", row[0])
if found:
VG1 = float(found[0][-7:].strip())
try:
# ! keyBand keyDataSet fkWCA TS FreqLO Power PolOutput VD0 VD1 VG0 VG1
if len(row) >= 10:
try:
if not VG0:
VG0 = float(row[9])
if not VG1:
VG1 = float(row[10])
except:
pass
output.append({'band' : int(row[0]),
'dataSet' : int(row[1]),
'serialNum' : int(row[2]),
'timeStamp' : makeTimeStamp(row[3]),
'freqLO' : float(row[4]),
'power' : float(row[5]),
'pol' : float(row[6]),
'VD0' : float(row[7]),
'VD1' : float(row[8]),
'VG0' : VG0,
'VG1' : VG1
})
except:
pass
return output | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/ConfigDelivery/WCA/WCADataFiles.py | WCADataFiles.py |
from ConfigDelivery.Plot.WCA import PlotWCA
from ConfigDelivery.WCA.LoadConfiguration import loadConfiguration
from ConfigDelivery.WCA.WCAOutputPower import WCAOutputPower
from ALMAFE.database.DriverMySQL import DriverMySQL as driver
from ALMAFE.basic.ParseTimeStamp import makeTimeStamp
from xml.etree import ElementTree as ET
import os.path
import pandas as pd
class WCADatabase():
'''
Wrapper for FETMS database WCA-related tables
'''
# define maximum safe LO power allowed per band. 0=no restriction:
BAND_MAX_SAFE_POWER_MW = {
1 : 0,
2 : 0,
3 : 0,
4 : 40,
5 : 40,
6 : 53,
7 : 53,
8 : 134,
9 : 168,
10 : 168
}
# define warm LO multiplication factor per band:
BAND_WARM_MULT = {
1 : 1,
2 : 6, # this is subject to change for the ESO band 2
3 : 6,
4 : 3,
5 : 6,
6 : 6,
7 : 6,
8 : 3,
9 : 3,
10 : 6
}
# define lowest LO freq per band:
BAND_LO_LOWEST = {
1 : 31.0,
2 : 79.0,
3 : 92.0,
4 : 133.0,
5 : 167.0,
6 : 221.0,
7 : 283.0,
8 : 393.0,
9 : 614.0,
10 : 799.0
}
def __init__(self):
'''
Constructor
'''
connectionInfo = loadConfiguration()
self.DB = driver(connectionInfo)
self.outputPower = WCAOutputPower()
self.plotWCA = None
def getWCAConfig(self, band = None, serialNum = None):
'''
Load the most recent configuration record(s) for one or more WCAs
:param band:int if provided, filter by band (1..10)
:param serialNum:str if provided find the record for a specific unit. Only has effect if band is provided.
:return list of dict{configId, band, serialNum, ESN, timeStamp, fLoYig, fHiYig, VG0, VG1}
or None if not found
'''
# this query joins FE_Components to itself in such a way that only the highest matching keyId is found
# it also joins WCAs to get the YIG oscillator limits
q = '''SELECT FEC0.keyId, FEC0.Band, FEC0.SN, FEC0.ESN1, FEC0.TS, WCAs.FloYIG, WCAs.FhiYIG, WCAs.VG0, WCAs.VG1
FROM FE_Components AS FEC0 LEFT JOIN FE_Components AS FEC1
ON FEC0.Band = FEC1.Band AND FEC0.SN = FEC1.SN AND FEC1.keyId > FEC0.keyId
JOIN WCAs ON fkFE_Component = FEC0.keyId
WHERE FEC1.keyId IS NULL
AND FEC0.fkFE_ComponentType = 11'''
if band and 1 <= band <= 10:
# filter by band:
q += " AND FEC0.Band = {0}".format(band)
if serialNum:
# filter for a specific SN of the provided band
q += " AND (FEC0.SN = '{:s}' OR FEC0.SN = '{:02d}')".format(str(serialNum), int(serialNum))
q += " ORDER BY FEC0.keyId;"
self.DB.execute(q)
rows = self.DB.fetchall()
if not rows:
return None
else:
# return list of dict:
return [{'configId' : row[0],
'band' : row[1],
'serialNum' : row[2],
'ESN' : row[3],
'timeStamp' : makeTimeStamp(row[4]),
'fLoYig' : row[5],
'fHiYig' : row[6],
'VG0' : row[7],
'VG1' : row[8]
} for row in rows]
def getWCAConfigSpecific(self, configId):
'''
Load a specific WCA configuration record
:param configId:int component keyId
:return dict{configId, band, serialNum, ESN, timeStamp, fLoYig, fHiYig}
or None if not found
'''
q = '''SELECT FEC.keyId, FEC.Band, FEC.SN, FEC.ESN1, FEC.TS, WCAs.FloYIG, WCAs.FhiYIG, WCAs.VG0, WCAs.VG1
FROM FE_Components AS FEC
JOIN WCAs ON fkFE_Component = FEC.keyId
WHERE FEC.keyId = {0}
AND FEC0.fkFE_ComponentType = 11'''.format(configId)
self.DB.execute(q)
row = self.DB.fetchone()
if not row:
return None
else:
# return dict:
return {'configId' : row[0],
'band' : row[1],
'serialNum' : row[2],
'ESN' : row[3],
'timeStamp' : makeTimeStamp(row[4]),
'fLoYig' : row[5],
'fHiYig' : row[6],
'VG0' : row[7],
'VG1' : row[8]}
def getWCAMaster(self, band = None, serialNum = None):
'''
Load the ESN for one or more band/serialNum pairs from the WCA SN to ESN master table
'WCAMaster_2020_11_10' provided by Jim Muehlberg on 10-November-2020
:param band:int if provided, filter by band (1..10)
:param serialNum:str if provided find the record for a specific unit. Only has effect if band is provided.
:return list of dict{band, serialNum, ESN, timeStamp, fLoYig, fHiYig, VG0, VG1} ordered by band, serialNum
'''
q = 'SELECT Band, UnitSerial, ESN, LoTune, HiTune, VG0, VG1 from WCAMaster_2020_11_10'
if band and 1 <= band <= 10:
# filter by band:
q += " WHERE Band = {0}".format(band)
if serialNum:
# filter for a specific SN of the provided band
q += " AND UnitSerial = '{0}'".format(serialNum)
q += " ORDER BY Band, UnitSerial;"
self.DB.execute(q)
rows = self.DB.fetchall()
if not rows:
return None
else:
# return list of dict:
return [{'band' : row[0],
'serialNum' : row[1],
'ESN' : row[2],
'timeStamp' : makeTimeStamp("2020-11-10"),
'fLoYig' : row[3],
'fHiYig' : row[4],
'VG0' : row[5],
'VG1' : row[6]
} for row in rows]
def getLOParams(self, configIds):
'''
Load the LOParams records for one or more configIds
:param configIds:int or list(int)
:return list of dict{configId, freqLO, VD0, VD1}
or None if not found
'''
q = "SELECT fkComponent, FreqLO, VDP0, VDP1 FROM WCA_LOParams"
q += " WHERE fkComponent in ("
first = True
try:
# try iteration:
for configId in configIds:
if first:
first = False
else:
q += ","
q += str(configId)
except:
# iteration failed, treat it as a single int:
q += str(configIds)
q += ") ORDER BY fkComponent, FreqLO;"
self.DB.execute(q)
rows = self.DB.fetchall()
if not rows:
return None
else:
# return list of dict:
return [{'configId' : row[0],
'freqLO' : row[1],
'VD0' : row[2],
'VD1' : row[3]
} for row in rows]
def updateESN(self, configId, ESN):
q = "UPDATE FE_Components SET ESN1 = '{0}' WHERE keyId = {1}".format(ESN, configId)
self.DB.execute(q, commit = True)
def updateYIG(self, configId, fLoYig, fHiYig):
q = "UPDATE WCAs SET FloYIG = '{0}', FhiYIG = '{1}' WHERE fkFE_Component = {2}".format(fLoYig, fHiYig, configId)
self.DB.execute(q, commit = True)
def updateVGs(self, configId, VG0, VG1):
q = "UPDATE WCAs SET VG0 = '{0}', VG1 = '{1}' WHERE fkFE_Component = {2}".format(VG0, VG1, configId)
self.DB.execute(q, commit = True)
def verifyWCA(self, band = None, serialNum = None, updateESN = False, updateYIG = False, updateVGs = False, showProgress = False):
'''
Verify that the WCA data in the Components and WCAs tables matches the Master table.
:param band:int if provided, filter by band (1..10)
:param serialNum:str if provided find the record for a specific unit. Only has effect if band is provided.
:param updateESN: if True, repair the Components table ESN to match the Master table
:param updateYIG: if True, repair the WCAs table YIG endpoints to match the Master table
:param updateVGs: if True, repair the WCAs table VG0 and VG1 to match the Master table
:param showProgress: if True, print the WCA SN and status for each item processed
:return list of dict{band, SN, found:bool, matchESN:bool, matchYIG:bool, matchVGs: bool}
'''
configs = self.getWCAConfig(band, serialNum)
masters = self.getWCAMaster(band, serialNum)
# loop on masters because it is in band, serialNum order:
output = []
for item in masters:
found = False
matchESN = False
matchYIG = False
matchVGs = False
if showProgress:
print('WCA{}-{:02d} '.format(item['band'], int(item['serialNum'])), end='', flush=True)
config = None
if configs:
config = next((x for x in configs \
if x['band'] == item['band'] \
and x['serialNum'].isnumeric() and int(x['serialNum']) == item['serialNum']), None)
if not config:
if showProgress:
print('not found')
else:
found = True
if config['ESN'] == item['ESN']:
matchESN = True
else:
if showProgress:
print('ESN mismmatch ', end='', flush=True)
if updateESN:
self.updateESN(config['configId'], item['ESN'])
if showProgress:
print('fixed! ', end='', flush=True)
if config['fLoYig'] == item['fLoYig'] and config['fHiYig'] == item['fHiYig']:
matchYIG = True
else:
if showProgress:
print('YIG mismmatch ', end='', flush=True)
if updateYIG:
self.updateYIG(config['configId'], item['fLoYig'], item['fHiYig'])
if showProgress:
print('fixed! ', end='', flush=True)
if config['VG0'] == item['VG0'] and config['VG1'] == item['VG1']:
matchVGs = True
else:
if showProgress:
print('VGs mismmatch ', end='', flush=True)
if updateVGs:
self.updateVGs(config['configId'], item['VG0'], item['VG1'])
if showProgress:
print('fixed! ', end='', flush=True)
output.append({'band' : item['band'],
'serialNum' : item['serialNum'],
'ESN' : item['ESN'],
'found' : found,
'matchESN' : matchESN,
'matchYIG' : matchYIG,
'matchVGs' : matchVGs
})
if showProgress:
print('')
return output
def insertWCA(self, wcaRecord):
'''
Insert a record into the FE_Components table and the WCAs table from a dict
:param wcaRecord: dict{band, serialNum, ESN, timeStamp, fLoYig, fHiYig, VG0, VG1}
:return bool: indicating success
'''
TS = wcaRecord['timeStamp'].strftime(self.DB.TIMESTAMP_FORMAT)
q = '''INSERT INTO `FE_Components`
(`fkFE_ComponentType`, `SN`, `ESN1`, `Band`, `TS`) VALUES (11, '{0}', '{1}', {2}, '{3}');
'''.format(wcaRecord['serialNum'], wcaRecord['ESN'], wcaRecord['band'], TS)
self.DB.execute(q)
self.DB.execute("SELECT LAST_INSERT_ID();")
row = self.DB.fetchone()
if not row:
self.DB.rollback()
return False
else:
q = '''INSERT INTO `WCAs` (`TS`, `fkFE_Component`, `FloYIG`, `FhiYIG`, `VG0`, `VG1`)
VALUES ('{0}', {1}, {2}, {3}, {4}, {5});
'''.format(TS, row[0], wcaRecord['fLoYig'], wcaRecord['fHiYig'], wcaRecord['VG0'], wcaRecord['VG1'])
if self.DB.execute(q):
self.DB.commit()
return True
else:
self.DB.rollback()
return False
def writeXML(self, outputDir, band = None, serialNum = None, showProgress = False, makePlot = False, showPlot = False):
'''
create XML data delivery file(s) for one or more WCAs
optionally create corresponding max safe power plot(s)
:param outputDir:str path to a directory where the files will be created/replaced
:param band:int if provided, filter by band (1..10)
:param serialNum:str if provided find the record for a specific unit. Only has effect if band is provided.
:param showProgress: if True write a dot to the console for each file written
:param makePlot: if True, generate a plot of the max safe power table
:param showPlot: if True and makePlot, show the plot interactively
:raise ValueError: if outputDir doesn't exist
'''
if not os.path.exists(outputDir):
raise ValueError('outputDir does not exist')
# get the matching WCAConfig records:
configs = self.getWCAConfig(band, serialNum)
if configs:
# loop to create individual WCA files:
for config in configs:
self.makeSerialString(config)
# only create files for WCAs with numeric serial numbers; exclude the test sources, etc:
doCreate = config['serialNumIsNumeric']
# don't create files for band 5 pre-production WCAs:
if doCreate and (config['band'] == 5 and int(config['serialNum']) < 10):
doCreate = False
if doCreate:
self.writeConfigXML(outputDir, config, showProgress, makePlot, showPlot)
self.writeOutputPowerXML(outputDir, config, showProgress)
if showProgress:
print(' done!')
def makeSerialString(self, config):
'''
Make a human-readable serial number string, like 'WCA3-21'
:param config: dict{configId, band, serialNum, ESN, timeStamp, fLoYig, fHiYig, VG0, VG1}
:return adds keys 'serialString' and 'serialNumIsNumeric' to config
'''
try:
# for numeric SN strings:
config['serialString'] = 'WCA{}-{:02d}'.format(config['band'], int(config['serialNum']))
config['serialNumIsNumeric'] = True
except:
# for SN strings containing non-numeric chars:
config['serialString'] = 'WCA{}-{}'.format(config['band'], config['serialNum'])
config['serialNumIsNumeric'] = False
def writeConfigXML(self, outputDir, config, showProgress = False, makePlot = False, showPlot = False):
'''
create XML config delivery file for a single WCA
optionally create corresponding max safe power plot
:param outputDir:str path to a directory where the file(s) will be created/replaced
:param config: dict{configId, band, serialNum, serialString, ESN, timeStamp, fLoYig, fHiYig, VG0, VG1}
:param showProgress: if True write a dot to the console for each file written
:param makePlot: if True, generate a plot of the max safe power table
:param showPlot: if True and makePlot, show the plot interactively
:raise ValueError: if outputDir doesn't exist
'''
if not os.path.exists(outputDir):
raise ValueError('outputDir does not exist')
# build the XML ElementTree:
top = ET.Element('ConfigData')
tree = ET.ElementTree(top)
ET.SubElement(top, 'ASSEMBLY', attrib = {'value' : 'WCA{0}'.format(config['band'])})
ET.SubElement(top, 'WCAConfig', attrib = {'value' : '{0}'.format(config['configId']),
'timestamp' : '{0}'.format(config['timeStamp'].isoformat())})
ET.SubElement(top, 'ESN', attrib = {'value' : '{0}'.format(config['ESN'])})
ET.SubElement(top, 'SN', attrib = {'value' : config['serialString']})
ET.SubElement(top, 'FLOYIG', attrib = {'value' : '{:.3f}E9'.format(config['fLoYig'])})
ET.SubElement(top, 'FHIYIG', attrib = {'value' : '{:.3f}E9'.format(config['fHiYig'])})
# not using LOParams here for data delivery. VDs=0.0 with the correct VGs:
ET.SubElement(top, 'PowerAmp', attrib = {'FreqLO' : '{:.3f}E9'.format(self.BAND_LO_LOWEST[config['band']]),
'VD0' : '0.0',
'VD1' : '0.0',
'VG0' : '{0}'.format(config['VG0']),
'VG1' : '{0}'.format(config['VG1'])})
# compute max safe LO power:
maxSafeTable = self.getMaxSafePowerTable(config) if 4 <= config['band'] <= 10 else None
if maxSafeTable:
# write the PowerAmpLimit elements:
for row in maxSafeTable:
ET.SubElement(top, 'PowerAmpLimit ', attrib = {'count' : '{0}'.format(int(row['yigTuning'])),
'VD0' : '{0}'.format(row['VD0']),
'VD1' : '{0}'.format(row['VD1'])})
# write the XML file. Filename is decimal representation of ESN:
if config['ESN']:
fileName = '{0}.XML'.format(int(config['ESN'], 16))
else:
fileName = '{0}.XML'.format(config['serialString'])
file = os.path.join(outputDir, fileName)
tree.write(file, encoding = "ISO-8859-1", xml_declaration = True)
# plot max safe power:
if makePlot:
if not self.plotWCA:
self.plotWCA = PlotWCA()
limit = self.BAND_MAX_SAFE_POWER_MW[config['band']]
plotName = '{0}_PowerAmpLimit.PNG'.format(config['serialString'])
plotName = os.path.join(outputDir, plotName)
title = "{} max safe power (limit = {} mW)\nfile: '{}'" \
.format(config['serialString'], limit, fileName)
self.plotWCA.plotMaxSafeTable(plotName, maxSafeTable, limit, title, show = showPlot)
if showProgress:
print('.', end='', flush=True)
def writeOutputPowerXML(self, outputDir, config, showProgress = False):
'''
create XML output power vs LO delivery file for a single WCA
:param outputDir:str path to a directory where the file will be created/replaced
:param config: dict{configId, band, serialNum, serialString, ESN, timeStamp, fLoYig, fHiYig, VG0, VG1}
:param showProgress: if True write a dot to the console for each file written
:raise ValueError: if outputDir doesn't exist
:return True if success, False if no data found or error
'''
if not os.path.exists(outputDir):
raise ValueError('outputDir does not exist')
# load all output power vs LO test data:
allRows = self.outputPower.getOutputPowerVsLO(config['configId'])
if not allRows:
return False
# build the XML ElementTree:
top = ET.Element('OutputPowerVsLO')
tree = ET.ElementTree(top)
ET.SubElement(top, 'ASSEMBLY', attrib = {'value' : 'WCA{0}'.format(config['band'])})
ET.SubElement(top, 'WCAConfig', attrib = {'value' : '{0}'.format(config['configId']),
'timestamp' : '{0}'.format(config['timeStamp'].isoformat())})
ET.SubElement(top, 'SN', attrib = {'value' : config['serialString']})
for row in allRows:
# [pol, freqLO, VD0, VD1, power]
ET.SubElement(top, 'OutputPower', attrib = {'pol' : '{0}'.format(row[0]),
'FreqLO' : '{:.3f}E9'.format(row[1]),
'VD0' : '{0}'.format(round(row[2], 4)),
'VD1' : '{0}'.format(round(row[3], 4)),
'powerMW' : '{0}'.format(round(row[4], 1))})
# write out the file:
fileName = '{0}_OutputPower.XML'.format(config['serialString'])
file = os.path.join(outputDir, fileName)
tree.write(file, encoding = "ISO-8859-1", xml_declaration = True)
if showProgress:
print('.', end='', flush=True)
return True
def getMaxSafePowerTable(self, config):
'''
Calculate and return a table of maximum safe LO power settings for the specified config.
:param config: single WCAConfig object
or: single int configId for a specific WCA.
:return list of dict{freqLO, yigTuning, VD0, VD1, power0, power1}
sorted by freqLO
'''
if isinstance(config, int):
config = self.getWCAConfigSpecific(config)
# if yigSpan is zero, something is wrong with this config. Get out now:
yigSpan = config['fHiYig'] - config['fLoYig']
if yigSpan == 0:
return None
# get the band-specific power limit. If 0 skip this:
limit = self.BAND_MAX_SAFE_POWER_MW[config['band']]
if not limit:
return None
# load all output power vs VD test data:
allRows = self.outputPower.getOutputPowerVsVD(config['configId'])
# quit now if there's no data:
if not allRows:
return None
allRows = pd.DataFrame(allRows, columns = ['pol', 'freqLO', 'VD0', 'VD1', 'power'])
# quit now if empty DataFrame:
if allRows.empty:
return None
try:
# find max output power for each pol:
f = allRows.groupby(['pol']).max()
maxVD0 = f.loc[0, 'VD0']
maxVD1 = f.loc[1, 'VD1']
except:
return None
# compute scaling factors to convert drain voltages into control values:
if maxVD0 == 0.0 or maxVD1 == 0.0:
return None
scaleVD0 = 2.5 / maxVD0
scaleVD1 = 2.5 / maxVD1
# reduce it to rows holding the max allowed output power:
allRows = self.findMaxSafeRows(allRows, limit);
# will divide by the WCA warm multiplication factor:
warmMult = self.BAND_WARM_MULT[config['band']]
# loop to scale the YIG tuning and drain voltages:
f = allRows
for i in range(0, f.shape[0]):
f.loc[f.index[i], 'VD0'] = round(f.loc[f.index[i], 'VD0'] * scaleVD0, 4)
f.loc[f.index[i], 'VD1'] = round(f.loc[f.index[i], 'VD1'] * scaleVD1, 4)
f.loc[f.index[i], 'power0'] = round(f.loc[f.index[i], 'power0'], 1)
f.loc[f.index[i], 'power1'] = round(f.loc[f.index[i], 'power1'], 1)
f.loc[f.index[i], 'yigTuning'] = round((((f.index[i] / warmMult) - config['fLoYig']) / yigSpan) * 4095)
# reset the index to include freqLO, return as a list of dict:
return allRows.reset_index().to_dict('records')
def findMaxSafeRows(self, allRows, powerLimit):
'''
transform result from loadOutputPower() into a table indexed by freqLO
having the max safe VD0, VD1 to stay under the powerLimit
and retaining only the endpoints of duplicate sequences on (VD0 AND VD1)
:param allRows:pandas.DataFrame[pol, freqLO, VD0, VD1, power]
sorted by Pol, freqLO, VD0, VD1
:param powerLimit:float max safe power allowed mW
:return pandas.DataFrame[freqLO, VD0, power0, VD1, power1]
sorted by freqLO
'''
# for each pol, grouped by freqLO, get the indices of the rows having max power but under the power limit:
ix0 = allRows[(allRows['power'] <= powerLimit) & (allRows['pol'] == 0)].groupby(['freqLO'])['VD0'].idxmax()
ix1 = allRows[(allRows['power'] <= powerLimit) & (allRows['pol'] == 1)].groupby(['freqLO'])['VD1'].idxmax()
# make new DataFrames for pol0 and pol1 from the found indices:
msr0 = pd.DataFrame([allRows.loc[x, ['freqLO', 'VD0', 'power']] for x in ix0])
msr1 = pd.DataFrame([allRows.loc[x, ['freqLO', 'VD1', 'power']] for x in ix1])
# rename the power columns:
msr0.rename(columns={"power": "power0"}, inplace = True)
msr1.rename(columns={"power": "power1"}, inplace = True)
# set the index to freqLO:
msr0.set_index('freqLO', inplace = True)
msr1.set_index('freqLO', inplace = True)
# join the two tables, 'outer' in case a freqLO record is missing from one or the other:
msr = msr0.join(msr1, how='outer')
# mark all but the endpoints of duplicate ranges over (VD0 AND VD1) with 'dup' = True
for i in range(1, msr.shape[0] - 2 + 1):
# matches prev record?
dupL = msr.iloc[i-1]['VD0'] == msr.iloc[i]['VD0'] and msr.iloc[i-1]['VD1'] == msr.iloc[i]['VD1']
# matches next record?
dupH = msr.iloc[i]['VD0'] == msr.iloc[i+1]['VD0'] and msr.iloc[i]['VD1'] == msr.iloc[i+1]['VD1']
# assign AND of those to new column 'dup':
msr.loc[msr.index[i], 'dup'] = True if dupL and dupH else False
# return the non 'dup' rows, exluding the 'dup' column:
return msr[msr['dup'] != True].loc[:, :'power1'] | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/ConfigDelivery/WCA/WCADatabase.py | WCADatabase.py |
from ALMAFE.database.DriverMySQL import DriverMySQL as driver
from ConfigDelivery.WCA.LoadConfiguration import loadConfiguration
from FETMSDatabase.TestDataHeader import TestDataHeader
class WCAOutputPower(object):
'''
classdocs
'''
KEYDATASET_VALUES = {
# definitions of values for keyDataSet field in WCA_OutputPower table
'OP_VS_LO' : 1, # output power vs LO at max VD
'OP_VS_VD_FINE_LO_STEPS' : 2, # output power vs VD for fine LO steps
'OP_VS_VD_LOW_MID_HI' : 3 # output power vs VD for low, middle, high LO
}
def __init__(self):
'''
Constructor
'''
connectionInfo = loadConfiguration()
self.DB = driver(connectionInfo)
self.TDH = TestDataHeader()
def getOutputPowerVsVD(self, configId):
'''
load all output power vs VD records for the specified WCA configId
:param configId:int to load
:return list of list[pol, freqLO, VD0, VD1, power]
sorted by pol, freqLO, VD0, VD1
'''
q = '''SELECT OP.Pol, OP.FreqLO, OP.VD0, OP.VD1, `Power`
FROM WCA_OutputPower AS OP JOIN TestData_header AS TDH
ON OP.fkHeader = TDH.keyId
WHERE fkFE_Components = {0} AND fkTestData_Type = {1}
AND (keyDataSet = {2} OR keyDataSet = {3})
ORDER BY Pol, FreqLO, VD0, VD1 ASC;
'''.format(configId, self.TDH.TEST_DATA_TYPES['WCA_OUTPUTPOWER'],
self.KEYDATASET_VALUES['OP_VS_VD_FINE_LO_STEPS'], self.KEYDATASET_VALUES['OP_VS_VD_LOW_MID_HI'])
if not self.DB.execute(q):
return None
rows = self.DB.fetchall()
if not rows:
return None
else:
allRows = [[row[0], row[1], row[2], row[3], row[4]] for row in rows]
return allRows
def getOutputPowerVsLO(self, configId):
'''
load all output power vs LO records for the specified WCA configId
:param configId:int to load
:return list of list[pol, freqLO, VD0, VD1, power]
sorted by pol, freqLO
'''
q = '''SELECT OP.Pol, OP.FreqLO, OP.VD0, OP.VD1, `Power`
FROM WCA_OutputPower AS OP JOIN TestData_header AS TDH
ON OP.fkHeader = TDH.keyId
WHERE fkFE_Components = {0} AND fkTestData_Type = {1} AND keyDataSet = {2}
ORDER BY Pol, FreqLO ASC;
'''.format(configId, self.TDH.TEST_DATA_TYPES['WCA_OUTPUTPOWER'], self.KEYDATASET_VALUES['OP_VS_LO'])
if not self.DB.execute(q):
return None
rows = self.DB.fetchall()
if not rows:
return None
else:
allRows = [[row[0], row[1], row[2], row[3], row[4]] for row in rows]
return allRows
def insertOutputPower(self, configId, rows, notes = None):
if not rows:
return False
keyId = self.TDH.insertHeader(self.TDH.TEST_DATA_TYPES['WCA_OUTPUTPOWER'],
configId,
self.TDH.TEST_DATA_STATUS['CARTRIDGE_PAI'],
rows[0]['band'],
timeStamp = rows[0]['timeStamp'],
notes = notes if notes else "loaded by ConfigDelivery.WCAOutputPower")
if not keyId:
return False
q = "INSERT INTO WCA_OutputPower(fkHeader, keyDataSet, TS, FreqLO, Power, Pol, VD0, VD1, VG0, VG1) VALUES "
firstTime = True
for row in rows:
if firstTime:
firstTime = False
else:
q += ", "
q += "({0}, {1}, '{2}', {3}, {4}, {5}, {6}, {7}, {8}, {9})".format( \
keyId, row['dataSet'], row['timeStamp'].strftime(self.DB.TIMESTAMP_FORMAT),
row['freqLO'], row['power'], row['pol'],
row['VD0'], row['VD1'], row['VG0'], row['VG1'])
return self.DB.execute(q, commit = True) | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/ConfigDelivery/WCA/WCAOutputPower.py | WCAOutputPower.py |
from xml.etree import ElementTree
from enum import IntEnum
import os.path
class fwVersion(IntEnum):
"""Enumeration of supported firmware versions. Use the nearest, rounded down"""
V2_5 = 25
V2_6 = 26
V2_8 = 28
V2_9 = 29
V3_0 = 30
DEFAULT = V2_8
class FEMCMakeIniFile:
"""A class to format the INI files
CARTn.INI, CRYO.INI, FRONTEND.INI, LPR.INI, WCAn.INI
to be loaded onto the FEMC module flash disk.
These are required for FEMC firmware 2.8.7 and before.
They are optional for FEMC firmware 3.0 and after."""
def __init__(self):
self.reset()
self.fwVersion = fwVersion.DEFAULT
def reset(self):
"""Reset the variables we expect to load from a file or outside source."""
self.band = 0
self.kind = None
self.ESN = None
self.PA_LIMITS = []
def setFwVersion(self, ver):
if isinstance(ver, fwVersion):
self.fwVersion = ver
else:
print("setFwVersion: unsupported version '{}'.".format(ver))
VER_STRINGS = {
fwVersion.V2_5 : "2.5.x",
fwVersion.V2_6 : "2.6.x",
fwVersion.V2_8 : "2.8.x",
fwVersion.V2_9 : "2.9.x",
fwVersion.V3_0 : "3.0.x"
}
def getFwVersionString(self):
return self.VER_STRINGS.get(self.fwVersion, "unknown")
ASSEMBLY = {
# a dict mapping the ASSEMBLY tag to (kind, band):
'CCA1': ('CCA', 1),
'CCA2': ('CCA', 2),
'CCA3': ('CCA', 3),
'CCA4': ('CCA', 4),
'CCA5': ('CCA', 5),
'CCA6': ('CCA', 6),
'CCA7': ('CCA', 7),
'CCA8': ('CCA', 8),
'CCA9': ('CCA', 9),
'CCA10': ('CCA', 10),
'WCA1': ('WCA', 1),
'WCA2': ('WCA', 2),
'WCA3': ('WCA', 3),
'WCA4': ('WCA', 4),
'WCA5': ('WCA', 5),
'WCA6': ('WCA', 6),
'WCA7': ('WCA', 7),
'WCA8': ('WCA', 8),
'WCA9': ('WCA', 9),
'WCA10': ('WCA', 10),
'LPR': ('LPR', 0),
'CRYOSTAT': ('CRYO', 0),
'FRONTEND': ('FRONTEND', 0)
}
def setXmlFile(self, filename):
"""Read an XML file and save things that may be needed for the INI file output."""
# reset things we expect to parse:
self.reset()
# parse the provided file. TODO: try/except?
tree = ElementTree.parse(filename)
# confirm that it is an ALMA ConfigData structure:
root = tree.getroot()
if root.tag != 'ConfigData':
print("No ConfigData found.")
return
# what assembly is this?
assy = root.find('ASSEMBLY')
if assy is None:
print("No ASSEMBLY found.")
return
assyName = assy.get('value')
if assyName is None:
print("No ASSEMBLY.value found.")
return
# find assyName in the ASSEMBLY dict:
(self.kind, self.band) = self.ASSEMBLY.get(assyName, (None, 0))
if self.kind is None:
print("Unsupported ASSEMBLY:", assyName)
return
ESN = root.find('ESN')
if ESN is not None:
ESN = ESN.get('hex')
if ESN is None:
ESN = ''
self.ESN = ESN
if self.kind == 'WCA':
# load the PA_LIMITS entries from XML:
for entry in root.findall('PowerAmpLimit'):
count = entry.get('count')
VD0 = entry.get('VD0')
VD1 = entry.get('VD1')
self.PA_LIMITS.append({
'count': count,
'VD0': VD0,
'VD1': VD1
})
else:
print("setXmlFile:", self.kind, "is not implemented.")
return
def makeIniText(self):
"""Return a string formatted as the required for FEMC module INI file."""
result = ""
if self.kind == 'WCA':
# write header section:
result += (";\n"
"; WCA configuration file")
result += " for FEMC firmware version {}\n".format(
self.getFwVersionString())
result += ";\n"
# write INFO section:
if self.fwVersion <= fwVersion.V2_6:
result += ("\n[INFO]\n"
"; This section is required for firmware 2.6 and older.\n"
"ESN={}\n".format(self.ESN))
# write PLL section:
if self.fwVersion <= fwVersion.V2_6:
loopBW = '9'; # undefined/don't care
if self.band in (2, 3, 5, 6, 7, 10):
loopBW = '1' # 15 MHz/V
elif self.band in (4, 8, 9):
loopBW = '0' # 30 MHz/V
result += ("\n[PLL]\n"
"; This section is required for firmware 2.6 and older.\n"
"; PLL loop bandwidth select (0 = 7.5MHz/V, 1 = 15 MHz/V, 9 = undefined)\n")
result += "LOOP_BW={}\n".format(loopBW)
# write SCALING section:
if self.fwVersion <= fwVersion.V2_6:
result += ("\n[SCALING]\n"
"; These settings are the same for all bands.\n"
"; Nonetheless, this section is required for firmware 2.6 and older.\n"
"PLL_LOCK=19.09090909\n"
"PLL_CORR=19.09090909\n"
"SUPPLY_V=20.0\n"
"MULT_C=100.0\n"
"PLL_YIG_C_SCALE=400.0\n"
"PLL_YIG_C_OFFSET=150.0\n")
# write PA_LIMITS section:
if self.fwVersion >= fwVersion.V2_6:
result += ("\n[PA_LIMITS]\n"
"; Output power limits section is supported but not required for firmware 2.6 and newer.\n"
"; The firmware will interpolate between table rows. Format is:\n"
";\n"
"; ENTRY_n=count, VD0 limit, VD1 limit\n"
";\n"
"; Where count is a raw YTO tuning word and the limits are VD *set* values.\n")
result += "ESN={}\n".format(self.ESN)
result += "ENTRIES={}\n".format(len(self.PA_LIMITS))
num = 0;
for entry in self.PA_LIMITS:
num += 1
result += "ENTRY_{num}={count}, {VD0}, {VD1}\n".format(
num = num,
count = entry['count'],
VD0 = entry['VD0'],
VD1 = entry['VD1'])
else:
print("makeIniText:", self.kind, "is not implemented.")
return result
def writeIniFile(self, targetDir = "."):
"""Write out an INI file suitable for use on the FEMC module to the given targetDir."""
if not os.path.isdir(targetDir):
print("writeIniFile: {} is not a directory.".format(targetDir))
if self.kind == 'WCA':
filename = os.path.join(targetDir, "WCA{}.INI".format(self.band))
elif self.kind == 'CCA':
filename = os.path.join(targetDir, "CCA{}.INI".format(self.band))
elif self.kind == 'CRYO':
filename = os.path.join(targetDir, "CRYO.INI")
elif self.kind == 'LPR':
filename = os.path.join(targetDir, "LPR.INI")
else:
print("writeIniFile: unsupported component: '{}'.".format(self.kind))
return
fp = open(filename, 'w')
fp.write(self.makeIniText())
fp.close()
def test_FEMCMakeIniFile():
fe = FEMCMakeIniFile()
#fe.setFwVersion(fwVersion.V2_6)
fe.setXmlFile("./test/92884882929221748.xml")
fe.writeIniFile("./test") | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/ConfigDelivery/FEMC/FEMCMakeIniFile.py | FEMCMakeIniFile.py |
import os.path
import configparser
import copy
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import ConfigFiles
class DeliveryConfig():
'''
Class to provide access to the CCA6 Data Delivery control files in ConfigFiles
'''
def __init__(self):
self.reset()
self.readDeliverablesConfig()
def reset(self):
self.__deliverables = []
def getDeliverables(self):
return copy.copy(self.__deliverables)
def readDeliverablesConfig(self):
config = configparser.ConfigParser()
text = pkg_resources.read_text(ConfigFiles, 'Deliverables Configuration.ini')
config.read_string(text)
self.deliverables = []
for section in config.sections():
enable = bool(config.get(section, 'Enable', fallback = True))
dataType = config.get(section, 'Type', fallback = None)
dataLocation = config.get(section, 'DataLocation', fallback = '')
configFileName = config.get(section, 'ConfigFileName', fallback = None)
outputFileName = config.get(section, 'OutputFileName', fallback = None)
outputFields = config.get(section, 'Output', fallback = '').split(',')
# add valid sections to the result:
if dataType and configFileName and outputFileName and len(outputFields):
self.__deliverables.append({
'name' : section,
'enable' : enable,
'dataType' : dataType,
'dataLocation' : dataLocation,
'configFileName' : configFileName,
'outputFileName' : outputFileName,
'outputFields' : outputFields
})
def readDataFormatFile(self, deliverable):
# check for supported dataType:
dataType = deliverable['dataType']
if not (dataType == 'Excel' or dataType == 'Pattern'):
print("Unsupported deliverable type '{}'".format(dataType))
return False
# drop keyBand, keyDataSet, fkCartAssy:
outputFields = deliverable['outputFields'][3:]
# parse the referenced configFile:
config = configparser.ConfigParser()
config.read_string(pkg_resources.read_text(ConfigFiles, deliverable['configFileName']))
deliveryItems = []
for section in config.sections():
sheetName = config.get(section, 'Sheetname', fallback = '')
macro = config.get(section, 'Macro', fallback = '')
if dataType == 'Excel':
valuesLocations = []
for key in outputFields:
value = config.get(section, key, fallback = '').strip()
if not value:
print("Section '{}' is missing key '{}'".format(section.name, key))
else:
if value[0] == '!':
valuesLocations.append({
'key' : key,
'location' : value[1:],
'value' : ''
})
else:
valuesLocations.append({
'key' : key,
'location' : '',
'value' : value
})
deliveryItems.append({
'sheetName' : sheetName,
'macro' : macro,
'valuesLocations' : valuesLocations
})
else:
# dataType == 'Pattern'
value = config.get(section, 'PatternKeys', fallback = '').strip()
if not value:
print("Section '{}' is missing key 'PatternKeys'".format(section.name))
else:
if value[0] == '!':
deliveryItems.append({
'sheetName' : sheetName,
'macro' : macro,
'valuesLocations' : [{'key' : 'PatternKeys', 'location' : value[1:], 'value' : ''}]
})
else:
deliveryItems.append({
'sheetName' : sheetName,
'macro' : macro,
'valuesLocations' : [{'key' : 'PatternKeys', 'location' : '', 'value' : value}]
})
return deliveryItems | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/ConfigDelivery/CCA6/DeliveryConfig.py | DeliveryConfig.py |
import plotly.graph_objects as go
from plotly.subplots import make_subplots
class PlotWCA(object):
'''
Plots related to WCA Config Delivery
'''
def __init__(self):
'''
Constructor
'''
pass
def plotMaxSafeTable(self, outputName, maxSafeTable, limit, title, show = False):
'''
Plot the contents of the WCA MaxSafePowerTable
:param outputName: PNG file to write
:param maxSafeTable: list of dict{freqLO, yigTuning, VD0, VD1, power0, power1}
sorted by freqLO
:param limit: maximum output power spec limit in mW
:param title: plot title to display
:param show: if True, display the plot interactively
'''
if not maxSafeTable:
return
fig = make_subplots(specs=[[{"secondary_y": True}]])
x = [row['freqLO'] for row in maxSafeTable]
VD0 = [row['VD0'] for row in maxSafeTable]
VD1 = [row['VD1'] for row in maxSafeTable]
power0 = [row['power0'] for row in maxSafeTable]
power1 = [row['power1'] for row in maxSafeTable]
limitX = [x[0], x[-1]]
limitY = [limit, limit]
line0 = dict(color='firebrick', width=4)
line1 = dict(color='royalblue', width=4)
line2 = dict(color='black', width=1)
fig.add_trace(go.Scatter(x = x, y = power0, line = line0, name = 'power0'), secondary_y=False)
fig.add_trace(go.Scatter(x = x, y = power1, line = line1, name = 'power1'), secondary_y=False)
fig.add_trace(go.Scatter(x = limitX, y = limitY, line = line2, name = 'limit', mode='lines'), secondary_y=False)
line0['dash'] = 'dash'
line1['dash'] = 'dash'
fig.add_trace(go.Scatter(x = x, y = VD0, line = line0, name = 'VD0 set'), secondary_y=True)
fig.add_trace(go.Scatter(x = x, y = VD1, line = line1, name = 'VD1 set'), secondary_y=True)
fig.update_xaxes(title_text = 'freqLO [GHz]')
fig.update_yaxes(title_text = 'power0, power1 [mW]', secondary_y=False)
fig.update_yaxes(title_text = 'VD0, VD1', secondary_y=True)
# yrange is limit rounded up to the next multiple of 50:
yrange = limit + (50 - limit % 50) if (limit % 50) else limit
fig.update_yaxes(range=[0, yrange], secondary_y=False)
fig.update_yaxes(range=[0, 3], secondary_y=True)
fig.update_layout(title_text = title)
imageData = fig.to_image(format = "png", width = 800, height = 500)
# save to file, if requested:
if outputName:
with open(outputName, 'wb') as file:
file.write(imageData)
# show interactive:
if show:
fig.show() | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/ConfigDelivery/Plot/WCA.py | WCA.py |
from ALMAFE.database.DriverMySQL import DriverMySQL as driver
from ALMAFE.basic.ParseTimeStamp import makeTimeStamp
from FETMSDatabase.LoadConfiguration import loadConfiguration
class TestDataHeader(object):
'''
classdocs
'''
TEST_DATA_TYPES = {
'WCA_OUTPUTPOWER' : 46
}
TEST_DATA_STATUS = {
'UNKNOWN' : 0,
'COLD_PAS' : 1,
'WARM_PAS' : 2,
'COLD_PAI' : 3,
'HEALTH_CHECK' : 4,
'CARTRIDGE_PAI' : 7
}
def __init__(self):
'''
Constructor
'''
connectionInfo = loadConfiguration()
self.DB = driver(connectionInfo)
def insertHeader(self, testDataType, fkFEComponent, dataStatus, band, dataSetGroup = 0, timeStamp = None, notes = None):
q = "INSERT INTO TestData_header(fkTestData_Type, DataSetGroup, fkFE_Components, fkDataStatus, Band, TS";
if notes:
q += ", Notes"
timeStamp = makeTimeStamp(timeStamp).strftime(self.DB.TIMESTAMP_FORMAT)
q += ") VALUES ({0}, {1}, {2}, {3}, {4}, '{5}'".format(testDataType, dataSetGroup, fkFEComponent, dataStatus, band, timeStamp)
if notes:
q += ", '{0}'".format(notes)
q += ");"
if not self.DB.execute(q):
return False
self.DB.execute("SELECT LAST_INSERT_ID();")
row = self.DB.fetchone()
if not row:
self.DB.rollback()
return False
else:
self.DB.commit()
return row[0]
def getHeader(self, testDataType, configId):
q = '''SELECT keyId, fkTestData_Type, DataSetGroup, fkFE_Components, Band, TS, Notes FROM TestData_header
WHERE fkTestData_Type = {0} AND fkFE_Components = {1} ORDER BY keyId DESC;'''.format(testDataType, configId)
self.DB.execute(q)
rows = self.DB.fetchall()
if not rows:
return None
else:
# return list of dict:
return [{'keyId' : row[0],
'type' : row[1],
'group' : row[2],
'configId' : row[3],
'band' : row[4],
'timeStamp' : makeTimeStamp(row[5]),
'notes' : row[6]
} for row in rows]
def getHeaderSpecific(self, keyId):
q = '''SELECT keyId, fkTestData_Type, DataSetGroup, fkFE_Components, Band, TS, Notes FROM TestData_header
WHERE keyId = {0}
ORDER BY keyId DESC;'''.format(keyId)
self.DB.execute(q)
row = self.DB.fetchone()
if not row:
return None
else:
return {'keyId' : row[0],
'type' : row[1],
'group' : row[2],
'configId' : row[3],
'band' : row[4],
'timeStamp' : makeTimeStamp(row[5]),
'notes' : row[6]
}
def deleteHeader(self, testDataType, configId):
q = '''DELETE FROM TestData_header
WHERE fkTestData_Type = {0} AND fkFE_Components = {1};'''.format(testDataType, configId)
self.DB.execute(q, commit = True)
def deleteHeaderSpecific(self, keyId):
q = "DELETE FROM TestData_header WHERE keyId = {0};".format(keyId)
self.DB.execute(q, commit = True) | ALMAFE-ConfigDelivery | /ALMAFE-ConfigDelivery-0.0.2.tar.gz/ALMAFE-ConfigDelivery-0.0.2/FETMSDatabase/TestDataHeader.py | TestDataHeader.py |
# ALMAFE-Lib package
Contains reusable tools which are required by other ALMAFE packages.
## ALMAFE.basic.ParseTimeStamp module
### class ParseTimeStamp:
Helper object for parsing time stamps in a variety of formats.
Caches last matching time stamp format string to speed subsequent calls.
### function makeTimeStamp(timeStamp = None):
initialized a timestamp from provided, or now() if None provided
:param timeStamp: string or datetime or None
## ALMAFE.basic.StripQuotes:
Utility to strip quotes from a string, if present.
## ALMAFE.common.GitVersion:
### function gitVersion():
Return the current Git tag (if any) and revision as a string
### function gitBranch():
Return the current Git branch name as a string
## ALMAFE.database.DriverMySQL:
### class DriverMySQL():
Driver wrapper for mysql-connector-python
Provides a uniform interface to SQL user code
### class DriverSQLite():
Driver wrapper for sqlite3
Provides a uniform interface to SQL user code
| ALMAFE-Lib | /ALMAFE-Lib-0.0.13.tar.gz/ALMAFE-Lib-0.0.13/README.md | README.md |
import cx_Oracle
class DriverOracle():
'''
Driver wrapper for cx_Oracle
Provides a uniform interface to SQL user code
'''
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
def __init__(self, connectionInfo):
'''
Constructor
:param connectionInfo: dictionary having the items needed to connect to Oracle server
{'host', 'user', 'passwd', 'service_name', 'schema', 'port' : 1521, 'encoding' : 'UTF-8'}
'''
self.host = connectionInfo['host']
self.user = connectionInfo['user']
self.passwd = connectionInfo['passwd']
self.service_name = connectionInfo['service_name']
self.port = connectionInfo.get('port', 1521)
self.schema = connectionInfo.get('schema', None)
encoding = connectionInfo.get('encoding', 'UTF-8')
self.connection = None
self.cursor = None
self.connect(encoding = encoding)
def connect(self, encoding = 'UTF-8'):
'''
Connect to the database.
:param encoding: str defaults to 'UTF-8'
:return True/False
'''
self.connection = None
try:
connString = "{}:{}/{}".format(self.host, self.port, self.service_name)
if self.connection:
self.connection.close()
self.connection = cx_Oracle.connect(self.user, self.passwd, connString, encoding=encoding)
self.cursor = self.connection.cursor()
if self.schema:
self.cursor.execute('ALTER SESSION SET CURRENT_SCHEMA = "{}"'.format(self.schema))
return True
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False
def disconnect(self):
'''
Disconnect from the database.
:return True/False
'''
try:
self.connection.close()
self.connection = None
self.cursor = None
return True
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False
def execute(self, query, params = None, commit = False):
'''
Execute an SQL query.
:param query: str
:param params: list (by position) or dict (by tag name) of values to assign to bind variables in the query.
Bind variables are names prefixed by a colon. E.g. :my_var
:param commit: If True, commit INSERT/UPDATE/DELETE queries immediately.
:return True/False
'''
try:
self.cursor.execute(query)
if commit:
self.connection.commit()
return True
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
print(query)
return False
def commit(self):
'''
Commit any previously executed but not yet committed INSERT/UPDATE/DELETE queries.
:return True/False
'''
try:
self.connection.commit()
return True
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False
def rollback(self):
'''
Rollback any previously executed but not yet committed INSERT/UPDATE/DELETE queries.
:return True/False
'''
try:
self.connection.rollback()
return True
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False
def fetchone(self):
'''
Fetch one row from the last SELECT query.
:return tuple or False
'''
try:
row = self.cursor.fetchone()
return row
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False
def fetchmany(self, chunkSize):
'''
Fetch multiple rows from the last SELECT query.
:param chunkSize: max number of rows to fetch
:return list of tuple or False
'''
try:
result = self.cursor.fetchmany(chunkSize)
return result
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False
def fetchall(self):
'''
Fetch all rows from the last SELECT query.
:return list of tuple or False
'''
try:
result = self.cursor.fetchall()
return result
except cx_Oracle.Error as e:
print(f"cx_Oracle error: {e}")
return False | ALMAFE-Lib | /ALMAFE-Lib-0.0.13.tar.gz/ALMAFE-Lib-0.0.13/ALMAFE/database/DriverOracle.py | DriverOracle.py |
import mysql.connector
from mysql.connector import Error
class DriverMySQL():
'''
Driver wrapper for mysql-connector-python
Provides a uniform interface to SQL user code
'''
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
def __init__(self, connectionInfo):
'''
Constructor
:param connectionInfo: dictionary having the items needed to connect to MySQL server:
{'host', 'user', 'passwd', 'database', 'port' : 3306, 'use_pure' : False }
'''
self.host = connectionInfo['host']
self.user = connectionInfo['user']
self.passwd = connectionInfo['passwd']
self.database = connectionInfo['database']
self.port = connectionInfo.get('port', 3306)
self.use_pure = connectionInfo.get('use_pure', False)
self.cursor = None
self.connect()
def connect(self):
'''
Connect to the database.
use_pure=True will prevent BLOBs being returned as Unicode strings
(which either fails when decoding or when comparing to bytes.)
https://stackoverflow.com/questions/52759667/properly-getting-blobs-from-mysql-database-with-mysql-connector-in-python
:return True/False
'''
self.connection = None
try:
self.connection = mysql.connector.connect(host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
database=self.database,
use_pure=self.use_pure)
self.cursor = self.connection.cursor()
return True
except Error as e:
print(f"MySQL error: {e}")
return False
def disconnect(self):
'''
Disconnect from the database.
:return True/False
'''
try:
self.connection.close()
self.connection = None
self.cursor = None
return True
except Error as e:
print(f"MySQL error: {e}")
return False
def execute(self, query, params = None, commit = False, reconnect = True):
'''
Execute an SQL query.
:param query: str
:param params: tuple or dictionary params are bound to the variables in the operation.
Specify variables using %s or %(name)s parameter style (that is, using format or pyformat style).
:param commit: If True, commit INSERT/UPDATE/DELETE queries immediately.
:param reconnect: If True and the connection seems to have gone away, reconnect and retry the query.
:return True/False
'''
doRetry = False
try:
self.cursor.execute(query, params)
if commit:
self.connection.commit()
except Error as e:
if not reconnect:
print(f"MySQL error: {e}")
return False
# this calls reconnect() internally:
self.connection.ping(reconnect = True, attempts = 2)
# get the cursor again:
self.cursor = self.connection.cursor()
doRetry = True
if doRetry:
# and retry the query
try:
self.cursor.execute(query, params)
if commit:
self.connection.commit()
except Error as e:
print(f"MySQL error: {e}")
return False
return True
def commit(self):
'''
Commit any previously executed but not yet committed INSERT/UPDATE/DELETE queries.
:return True/False
'''
try:
self.connection.commit()
return True
except Error as e:
print(f"MySQL error: {e}")
return False
def rollback(self):
'''
Rollback any previously executed but not yet committed INSERT/UPDATE/DELETE queries.
:return True/False
'''
try:
self.connection.rollback()
return True
except Error as e:
print(f"MySQL error: {e}")
return False
def fetchone(self):
'''
Fetch one row from the last SELECT query.
:return tuple or False
'''
try:
row = self.cursor.fetchone()
return row
except Error as e:
print(f"MySQL error: {e}")
return False
def fetchmany(self, chunkSize):
'''
Fetch multiple rows from the last SELECT query.
:param chunkSize: max number of rows to fetch
:return list of tuple or False
'''
try:
result = self.cursor.fetchmany(chunkSize)
return result
except Error as e:
print(f"MySQL error: {e}")
return False
def fetchall(self):
'''
Fetch all rows from the last SELECT query.
:return list of tuple or False
'''
try:
result = self.cursor.fetchall()
return result
except Error as e:
print(f"MySQL error: {e}")
return False | ALMAFE-Lib | /ALMAFE-Lib-0.0.13.tar.gz/ALMAFE-Lib-0.0.13/ALMAFE/database/DriverMySQL.py | DriverMySQL.py |
import sqlite3
class DriverSQLite():
'''
Driver wrapper for sqlite3
Provides a uniform interface to SQL user code
'''
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
def __init__(self, connectionInfo):
'''
Constructor
:param connectionInfo: dictionary having the items needed to connect to SQLite server
{ 'LocalDatabaseFile' : <path str> }
'''
self.localDatabaseFile = connectionInfo['localDatabaseFile']
self.connect()
def connect(self):
'''
Connect to the database.
:return True/False
'''
self.connection = None
try:
self.connection = sqlite3.connect(self.localDatabaseFile)
cursor = self.connection.cursor()
cursor.execute("PRAGMA foreign_keys = ON;")
return True
except Exception as e:
print(f"SQLite error: {e}")
return False
def disconnect(self):
'''
Disconnect from the database.
:return True/False
'''
try:
self.connection.close()
self.connection = None
self.cursor = None
return True
except Exception as e:
print(f"SQLite error: {e}")
return False
def execute(self, query, params = None, commit = False):
'''
Execute an SQL query.
:param query: str
:param params: tuple or dictionary params are bound to the variables in the operation.
Specify variables using %s or %(name)s parameter style (that is, using format or pyformat style).
:param commit: If True, commit INSERT/UPDATE/DELETE queries immediately.
:return True/False
'''
self.cursor = self.connection.cursor()
try:
if params:
self.cursor.execute(query, params)
else:
self.cursor.execute(query)
if commit:
self.connection.commit()
return True
except Exception as e:
print(f"SQLite error: {e}")
return False
def executemany(self, query, params, commit = False):
'''
Executes a parameterized SQL command against all params. For bulk insert.
:param params: tuple or dictionary params are bound to the variables in the operation.
Specify variables using %s or %(name)s parameter style (that is, using format or pyformat style).
:param commit: If True, commit INSERT/UPDATE/DELETE queries immediately.
:return True/False
'''
self.cursor = self.connection.cursor()
try:
self.cursor.executemany(query, params)
if commit:
self.connection.commit()
return True
except Exception as e:
print(f"SQLite error: {e}")
return False
def commit(self):
'''
Commit any previously executed but not yet committed INSERT/UPDATE/DELETE queries.
:return True/False
'''
try:
self.connection.commit()
return True
except Exception as e:
print(f"SQLite error: {e}")
return False
def rollback(self):
'''
Rollback any previously executed but not yet committed INSERT/UPDATE/DELETE queries.
:return True/False
'''
try:
self.connection.rollback()
return True
except Exception as e:
print(f"SQLite error: {e}")
return False
def fetchone(self):
'''
Fetch one row from the last SELECT query.
:return tuple or False
'''
try:
row = self.cursor.fetchone()
return row
except Exception as e:
print(f"SQLite error: {e}")
return False
def fetchmany(self, chunkSize):
'''
Fetch multiple rows from the last SELECT query.
:param chunkSize: max number of rows to fetch
:return list of tuple or False
'''
try:
result = self.cursor.fetchmany(chunkSize)
return result
except Exception as e:
print(f"SQLite error: {e}")
return False
def fetchall(self):
'''
Fetch all rows from the last SELECT query.
:return list of tuple or False
'''
try:
result = self.cursor.fetchall()
return result
except Exception as e:
print(f"SQLite error: {e}")
return False | ALMAFE-Lib | /ALMAFE-Lib-0.0.13.tar.gz/ALMAFE-Lib-0.0.13/ALMAFE/database/DriverSQLite.py | DriverSQLite.py |
class Node():
def __init__(self, name, attrs = None, parent = None):
'''
Constructor
:param name: str key for searching
:param attrs: dict of all attributes other than name
:param parent: Node to make this a child of
'''
try:
self.name = str(name)
except (AttributeError, TypeError):
raise AssertionError('name parameter must be convertible to str')
# list of child nodes:
self.children = []
# dict of other attributes:
self.attrs = attrs if attrs else {}
if parent:
# make this a child of the specified parent:
try:
parent.children.append(self)
except (AttributeError, TypeError):
raise AssertionError('parent parameter must be a Node or None')
def find(self, name, includeRoot = True):
'''
Search for a node at or below this one with the specified name, using a breadth-first traversal.
:param name: str node name to find
:return the found node or None
'''
try:
name = str(name)
except (AttributeError, TypeError):
raise AssertionError('name parameter must be convertible to str')
for node in self.breadthFirst(includeRoot = includeRoot):
if name == node.name:
return node
return None
def findDF(self, name, includeRoot = True):
'''
search for a Node at or below this one with the specified name, using a depth-first pre-order traversal
:param name: str node name to find
:return the found node or None
'''
try:
name = str(name)
except (AttributeError, TypeError):
raise AssertionError('name parameter must be convertible to str')
for node in self.depthFirst(includeRoot = includeRoot):
if name == node.name:
return node
return None
def depthFirst(self, includeRoot = True, postOrder = False):
'''
Generator for recursive depth-first traversal of the tree
:param includeRoot: if False, suppress yeilding the node where the traversal started.
:param postOrder: if True, visit the root node last.
'''
# PreOrder - visit the root node first:
if includeRoot and not postOrder:
yield self
# Depth-first: recursively traverse children, always including root node:
for child in self.children:
for node in child.depthFirst(includeRoot = True, postOrder = postOrder):
yield node
# PostOrder - visit the root node last:
if includeRoot and postOrder:
yield self
def breadthFirst(self, includeRoot = True):
'''
Generator for recursive breadth-first traversal of the tree
Level-order: visit the root node first.
:param includeRoot: if False, suppress yeilding the node where the traversal started.
'''
# Level-order - visit the root node first:
if includeRoot:
yield self
# Breadth-first - visit each child node:
for child in self.children:
yield child
# Breadth-first - recursively traverse children, suppressing each child's root node:
for child in self.children:
for node in child.breadthFirst(includeRoot = False):
yield node | ALMAFE-Lib | /ALMAFE-Lib-0.0.13.tar.gz/ALMAFE-Lib-0.0.13/ALMAFE/datastruct/NWayTree.py | NWayTree.py |
from datetime import datetime
import dateutil.parser
import copy
import sys
class ParseTimeStamp(object):
'''
Helper object for parsing time stamps in a variety of formats.
Caches last matching time stamp format string to speed subsequent calls.
'''
def __init__(self):
'''
Constructor
'''
self.lastTimeStampFormat = None
def parseTimeStamp(self, timeStampString):
'''
Parses and tries to find the format of the given timeStampString.
Several formats are explicitly supported. If none of those match it uses a slower smart-matching algorithm.
Returns a datetime object.
Side-effect: If succesful determing the format, sets self.timeStampFormat to the matching format string.
:param timeStampString: A string representation of a timeStamp, such as "2020-05-21 14:30:15.100"
'''
self.lastTimeStampFormat = None
# try SQL format:
timeStamp = self.parseTimeStampWithFormatString(timeStampString, '%Y-%m-%d %H:%M:%S')
if timeStamp:
return timeStamp
# try SQL format with milliseconds:
timeStamp = self.parseTimeStampWithFormatString(timeStampString, '%Y-%m-%d %H:%M:%S.%f')
if timeStamp:
# was parsed as microseconds; convert to ms:
timeStamp.replace(microsecond= timeStamp.microsecond // 1000)
return timeStamp
# try with seconds and AM/PM:
timeStamp = self.parseTimeStampWithFormatString(timeStampString, '%Y-%m-%d %I:%M:%S %p')
if timeStamp:
return timeStamp
# ask dateutil.parser to do its best:
try:
timeStamp = dateutil.parser.parse(timeStampString)
except ValueError as err:
return False
except:
print("Unexpected error:", sys.exc_info()[0])
raise
else:
return timeStamp
def parseTimeStampWithFormatString(self, timeStampString, timeStampFormat):
'''
Private, though is called directly by test cases.
Test parsing timeStamp using the given timeStampFormat
returns a datetime object if successful, False otherwise
Side-effect: If succesful, sets self.timeStampFormat to timeStampFormat.
:param timeStampString: string to parse
:type timeStampString: str
:param timeStampFormat: format to try, using the format codes from datetime.strptime()
:type timeStampFormat: str
'''
self.lastTimeStampFormat = None
try:
timeStamp = datetime.strptime(timeStampString, timeStampFormat)
except ValueError as err:
return False
except:
print("Unexpected error:", sys.exc_info()[0])
raise
else:
self.lastTimeStampFormat = timeStampFormat
return timeStamp
def makeTimeStamp(timeStamp = None):
'''
initialized a timestamp from provided, or now() if None provided
:param timeStamp: string or datetime or None
'''
if not timeStamp:
return datetime.now()
if isinstance(timeStamp, datetime):
return copy.copy(timeStamp)
try:
makeTimeStamp.parseTimeStamp
except:
makeTimeStamp.parseTimeStamp = ParseTimeStamp()
try:
return makeTimeStamp.parseTimeStamp.parseTimeStamp(timeStamp)
except:
return datetime.now() | ALMAFE-Lib | /ALMAFE-Lib-0.0.13.tar.gz/ALMAFE-Lib-0.0.13/ALMAFE/basic/ParseTimeStamp.py | ParseTimeStamp.py |
# ALMASim
[](https://arxiv.org/abs/2211.11462)


ALMASim is a package to generate mock observations of HI line galaxies as observed by the Atacama Large Millimetre/Submillimetre Array (ALMA). ALMASim primary goal is to allow users to generate simulated datasets on which to test deconvolution and source detection models. ALMASim is intended to leverage MPI parallel computing on modern HPC clusters to generate thousands of ALMA data cubes. Users are free to set both source and observational parameters such as antenna configuration, bandwidth, integration time and so on, or completely generate data using random configuration and sources.
ALMASim is built upon the CASA PiP Wheels (https://casadocs.readthedocs.io/en/latest/index.html), the MARTINI Package (https://github.com/kyleaoman/martini), and the Illustris Python Package (https://github.com/illustristng/illustris_python) to be able to generate observations of both high redshift point-like sources and close extendend sources in all possible ALMA configurations.
For every observed target ALMASim generates:
- A Sky model .fits cube containing the source without any source of noise or instrumental effects;
- A Dirty .fits cube, i.e. the Fourier inversion of the observed visibilities
- A Measurements set .npy file containing the measured visibilities as a numpy array
- A parameters.csv containing the observational and source parameters
## Installation
<pre><code> pip install ALMASim </code></pre>
If you are interested in simulating Extended sources, you need to download and configure the Illustris TNG100-1 simulation folder.

The picture shows an example for Snapshot-99, reproduce this for every Snapshot you are interested i. You can check more at the Illustris TNG official website: https://www.tng-project.org/data/
## Usage
To run the simulation, just navigate to the ALMASim folder and execute
<pre><code> python main.py --option value --option1 value1 value2 </code></pre>
to check the available option run
<pre><code> python main.py -h </code></pre>
**Happy Simulations**
### Cite us
Michele Delli Veneri, Łukasz Tychoniec, Fabrizia Guglielmetti, Giuseppe Longo, Eric Villard, 3D Detection and Characterisation of ALMA Sources through Deep Learning, Monthly Notices of the Royal Astronomical Society, 2022;, stac3314, https://doi.org/10.1093/mnras/stac3314
@article{10.1093/mnras/stac3314,
author = {Delli Veneri, Michele and Tychoniec, Łukasz and Guglielmetti, Fabrizia and Longo, Giuseppe and Villard, Eric},
title = "{3D Detection and Characterisation of ALMA Sources through Deep Learning}",
journal = {Monthly Notices of the Royal Astronomical Society},
year = {2022},
month = {11},
issn = {0035-8711},
doi = {10.1093/mnras/stac3314},
url = {https://doi.org/10.1093/mnras/stac3314},
note = {stac3314},
eprint = {https://academic.oup.com/mnras/advance-article-pdf/doi/10.1093/mnras/stac3314/47014718/stac3314.pdf},
}
| ALMASim | /ALMASim-1.4.tar.gz/ALMASim-1.4/README.md | README.md |
import os
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
import argparse
from sklearn.model_selection import train_test_split
from natsort import natsorted
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str,
help='The directory containing the sims subdirectory;', default='')
parser.add_argument('tclean_flag', type=bool,
help='tCLEAN flag, if set to True, it expects to find tclea cleaned cubes within the sims folder',
default=True)
parser.add_argument('train_size', type=float, help='Training size, float between 0 and 1.', default=0.8)
args = parser.parse_args()
data_dir = args.data_dir
tclean_flag = args.tclean_flag
train_size = args.train_size
input_dir = os.path.join(data_dir, 'sims')
train_dir = os.path.join(data_dir, "Train")
test_dir = os.path.join(data_dir, "Test")
valid_dir = os.path.join(data_dir, "Validation")
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(test_dir):
os.mkdir(test_dir)
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
dlist = np.array(natsorted(list([file for file in os.listdir(input_dir) if 'dirty' in file])))
clist = np.array(natsorted(list([file for file in os.listdir(input_dir) if 'clean' in file])))
if tclean_flag is True:
tlist = np.array(natsorted(list([file for file in os.listdir(input_dir) if 'tcleaned' in file])))
params = pd.read_csv(os.path.join(input_dir, "params.csv"))
indexes = np.arange(dlist.shape[0])
train_idxs, test_idxs = train_test_split(indexes, test_size=1 - train_size, random_state=42)
train_idxs, valid_idxs = train_test_split(train_idxs, test_size=0.25, random_state=42)
train_idxs = np.array(natsorted(train_idxs))
test_idxs = np.array(natsorted(test_idxs))
valid_idxs = np.array(natsorted(valid_idxs))
train_params = params[params['ID'].isin(train_idxs)]
valid_params = params[params['ID'].isin(valid_idxs)]
test_params = params[params['ID'].isin(test_idxs)]
train_params.to_csv(os.path.join(train_dir, 'train_params.csv'), index=False)
test_params.to_csv(os.path.join(test_dir, 'test_params.csv'), index=False)
valid_params.to_csv(os.path.join(valid_dir, 'valid_params.csv'), index=False)
print('Splitting fits cubes in Train, Test, and Validation')
for idx in tqdm(indexes):
if idx in train_idxs:
os.system("cp {} {}".format(os.path.join(input_dir, dlist[idx]),
os.path.join(train_dir, dlist[idx])))
os.system("cp {} {}".format(os.path.join(input_dir, clist[idx]),
os.path.join(train_dir, clist[idx])))
if tclean_flag is True:
os.system("cp {} {}".format(os.path.join(input_dir, tlist[idx]),
os.path.join(train_dir, tlist[idx])))
elif idx in valid_idxs:
os.system("cp {} {}".format(os.path.join(input_dir, dlist[idx]),
os.path.join(valid_dir, dlist[idx])))
os.system("cp {} {}".format(os.path.join(input_dir, clist[idx]),
os.path.join(valid_dir, clist[idx])))
if tclean_flag is True:
os.system("cp {} {}".format(os.path.join(input_dir, tlist[idx]),
os.path.join(valid_dir, tlist[idx])))
else:
os.system("cp {} {}".format(os.path.join(input_dir, dlist[idx]),
os.path.join(test_dir, dlist[idx])))
os.system("cp {} {}".format(os.path.join(input_dir, clist[idx]),
os.path.join(test_dir, clist[idx])))
if tclean_flag is True:
os.system("cp {} {}".format(os.path.join(input_dir, tlist[idx]),
os.path.join(test_dir, tlist[idx]))) | ALMASim | /ALMASim-1.4.tar.gz/ALMASim-1.4/utility/split_data.py | split_data.py |
from ast import arguments
from email.policy import default
from cv2 import _OUTPUT_ARRAY_DEPTH_MASK_16F
import numpy as np
import pandas as pd
import argparse
from astropy.io import fits
import os
from photutils.aperture import CircularAnnulus, CircularAperture
from tqdm import tqdm
from radio_beam import Beam
import cv2
import astropy.units as u
def measure_snr(img, box):
y0, x0, y1, x1 = box
xc, yc = 180, 180
r0, r1 = 1.6 * (x1 - x0), 2.6 * (x1 - x0)
r = 0.5 * (x1 - x0)
noise_aperture = CircularAnnulus((xc, yc), r0 / 2, r1 / 2 )
mask = noise_aperture.to_mask(method='center')
source_aperture = CircularAperture((xc, yc), r)
aperture_mask = source_aperture.to_mask()
noise_p = mask.multiply(img)
noise_p = noise_p[mask.data > 0]
source_p = aperture_mask.multiply(img)
source_p = source_p[aperture_mask.data > 0.]
std = np.std(noise_p)
mean = np.mean(source_p)
snr = mean / std
#print('Source Mean: ', mean)
#print('Noise RMS: ', std)
return snr
def generate_noise(rms):
ch, rows, cols = 128, 360, 360
mean = 0
gauss = np.random.normal(mean, rms, (ch, rows, cols))
return gauss
def add_noise(cube, rms):
bmaj = 0.886 * u.arcsec
bmin = 0.7691 * u.arcsec
noise = generate_noise(rms)
pix_scale = 0.1 * u.arcsec
gk = np.array(Beam(bmaj, bmin).as_kernel(pix_scale))
noisy_cube = np.zeros(cube.shape)
for z in range(cube.shape[0]):
noisy_cube[z] = cube[z] + cv2.filter2D(noise[z], -1, gk)
return noisy_cube
def measure_params(input_dataframe, output_dir, n, rms_noise):
cont, flux, peak, snr = [], [], [], []
for i in tqdm(range(n)):
source_params = input_dataframe.loc[input_dataframe.ID == i]
boxes = np.array(source_params[["y0", "x0", "y1", "x1"]].values)
dirty_cube = fits.getdata(os.path.join(output_dir, "dirty_cube_{}.fits".format(str(i))))[0]
if rms_noise != 0.:
dirty_cube = add_noise(dirty_cube, rms_noise)
hdu = fits.PrimaryHDU(data=dirty_cube.astype(np.float32))
hdu.writeto(os.path.join(output_dir, "dirty_cube_{}.fits".format(str(i))), overwrite=True)
dirty_img = np.sum(dirty_cube, axis=0)
for j, box in enumerate(boxes):
source_param = source_params.iloc[j, :]
z, fwhm_z = int(source_param['z']), int(source_param['fwhm_z'])
y0, x0, y1, x1 = box
source_pixels = dirty_cube[z - fwhm_z: z + fwhm_z, y0: y1, x0: x1]
cont_pixels = np.concatenate((dirty_cube[: z - fwhm_z, y0:y1, x0:x1], dirty_cube[z + fwhm_z: , y0:y1, x0:x1]), axis=0)
cont.append(np.mean(cont_pixels))
flux.append(np.sum(source_pixels))
peak.append(np.max(source_pixels))
snr.append(measure_snr(dirty_img, box))
cont = np.array(cont).astype(np.float32)
flux = np.array(flux).astype(np.float32)
peak = np.array(peak).astype(np.float32)
snr = np.array(snr).astype(np.float32)
input_dataframe['continuum'] = cont
input_dataframe['flux'] = flux
input_dataframe['peak'] = peak
input_dataframe['snr'] = snr
name = os.path.join(output_dir, 'params.csv')
input_dataframe.to_csv(name, index=False)
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str,
help='the directory where the simulated model cubes and the params.csv file are stored')
parser.add_argument('output_dir', type=str,
help='the directory where the simulated cubes are stored')
parser.add_argument('catalogue_name', type=str, help='the name of the .csv file')
parser.add_argument('noise_rms', type=float, default='RMS of the noise to add to the cube over the standard noise')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
catalogue_name = args.catalogue_name
rms_noise = args.noise_rms
input_dataframe = pd.read_csv(os.path.join(input_dir, catalogue_name))
n = len(list(os.listdir(input_dir))) - 1
measure_params(input_dataframe, output_dir, n, rms_noise) | ALMASim | /ALMASim-1.4.tar.gz/ALMASim-1.4/utility/generate_gaussian_params.py | generate_gaussian_params.py |
snapshot.py: File I/O related to the snapshot files. """
from __future__ import print_function
import numpy as np
import h5py
import six
from os.path import isfile
from .util import partTypeNum
from .groupcat import gcPath, offsetPath
def snapPath(basePath, snapNum, chunkNum=0):
""" Return absolute path to a snapshot HDF5 file (modify as needed). """
snapPath = basePath + '/snapdir_' + str(snapNum).zfill(3) + '/'
filePath1 = snapPath + 'snap_' + str(snapNum).zfill(3) + '.' + str(chunkNum) + '.hdf5'
filePath2 = filePath1.replace('/snap_', '/snapshot_')
if isfile(filePath1):
return filePath1
return filePath2
def getNumPart(header):
""" Calculate number of particles of all types given a snapshot header. """
if 'NumPart_Total_HighWord' not in header:
return header['NumPart_Total'] # new uint64 convention
nTypes = 6
nPart = np.zeros(nTypes, dtype=np.int64)
for j in range(nTypes):
nPart[j] = header['NumPart_Total'][j] | (header['NumPart_Total_HighWord'][j] << 32)
return nPart
def loadSubset(basePath, snapNum, partType, fields=None, subset=None, mdi=None, sq=True, float32=False):
""" Load a subset of fields for all particles/cells of a given partType.
If offset and length specified, load only that subset of the partType.
If mdi is specified, must be a list of integers of the same length as fields,
giving for each field the multi-dimensional index (on the second dimension) to load.
For example, fields=['Coordinates', 'Masses'] and mdi=[1, None] returns a 1D array
of y-Coordinates only, together with Masses.
If sq is True, return a numpy array instead of a dict if len(fields)==1.
If float32 is True, load any float64 datatype arrays directly as float32 (save memory). """
result = {}
ptNum = partTypeNum(partType)
gName = "PartType" + str(ptNum)
# make sure fields is not a single element
if isinstance(fields, six.string_types):
fields = [fields]
# load header from first chunk
with h5py.File(snapPath(basePath, snapNum), 'r') as f:
header = dict(f['Header'].attrs.items())
nPart = getNumPart(header)
# decide global read size, starting file chunk, and starting file chunk offset
if subset:
offsetsThisType = subset['offsetType'][ptNum] - subset['snapOffsets'][ptNum, :]
fileNum = np.max(np.where(offsetsThisType >= 0))
fileOff = offsetsThisType[fileNum]
numToRead = subset['lenType'][ptNum]
else:
fileNum = 0
fileOff = 0
numToRead = nPart[ptNum]
result['count'] = numToRead
if not numToRead:
# print('warning: no particles of requested type, empty return.')
return result
# find a chunk with this particle type
i = 1
while gName not in f:
f = h5py.File(snapPath(basePath, snapNum, i), 'r')
i += 1
# if fields not specified, load everything
if not fields:
fields = list(f[gName].keys())
for i, field in enumerate(fields):
# verify existence
if field not in f[gName].keys():
raise Exception("Particle type ["+str(ptNum)+"] does not have field ["+field+"]")
# replace local length with global
shape = list(f[gName][field].shape)
shape[0] = numToRead
# multi-dimensional index slice load
if mdi is not None and mdi[i] is not None:
if len(shape) != 2:
raise Exception("Read error: mdi requested on non-2D field ["+field+"]")
shape = [shape[0]]
# allocate within return dict
dtype = f[gName][field].dtype
if dtype == np.float64 and float32: dtype = np.float32
result[field] = np.zeros(shape, dtype=dtype)
# loop over chunks
wOffset = 0
origNumToRead = numToRead
while numToRead:
f = h5py.File(snapPath(basePath, snapNum, fileNum), 'r')
# no particles of requested type in this file chunk?
if gName not in f:
f.close()
fileNum += 1
fileOff = 0
continue
# set local read length for this file chunk, truncate to be within the local size
numTypeLocal = f['Header'].attrs['NumPart_ThisFile'][ptNum]
numToReadLocal = numToRead
if fileOff + numToReadLocal > numTypeLocal:
numToReadLocal = numTypeLocal - fileOff
#print('['+str(fileNum).rjust(3)+'] off='+str(fileOff)+' read ['+str(numToReadLocal)+\
# '] of ['+str(numTypeLocal)+'] remaining = '+str(numToRead-numToReadLocal))
# loop over each requested field for this particle type
for i, field in enumerate(fields):
# read data local to the current file
if mdi is None or mdi[i] is None:
result[field][wOffset:wOffset+numToReadLocal] = f[gName][field][fileOff:fileOff+numToReadLocal]
else:
result[field][wOffset:wOffset+numToReadLocal] = f[gName][field][fileOff:fileOff+numToReadLocal, mdi[i]]
wOffset += numToReadLocal
numToRead -= numToReadLocal
fileNum += 1
fileOff = 0 # start at beginning of all file chunks other than the first
f.close()
# verify we read the correct number
if origNumToRead != wOffset:
raise Exception("Read ["+str(wOffset)+"] particles, but was expecting ["+str(origNumToRead)+"]")
# only a single field? then return the array instead of a single item dict
if sq and len(fields) == 1:
return result[fields[0]]
return result
def getSnapOffsets(basePath, snapNum, id, type):
""" Compute offsets within snapshot for a particular group/subgroup. """
r = {}
# old or new format
if 'fof_subhalo' in gcPath(basePath, snapNum):
# use separate 'offsets_nnn.hdf5' files
with h5py.File(offsetPath(basePath, snapNum), 'r') as f:
groupFileOffsets = f['FileOffsets/'+type][()]
r['snapOffsets'] = np.transpose(f['FileOffsets/SnapByType'][()]) # consistency
else:
# load groupcat chunk offsets from header of first file
with h5py.File(gcPath(basePath, snapNum), 'r') as f:
groupFileOffsets = f['Header'].attrs['FileOffsets_'+type]
r['snapOffsets'] = f['Header'].attrs['FileOffsets_Snap']
# calculate target groups file chunk which contains this id
groupFileOffsets = int(id) - groupFileOffsets
fileNum = np.max(np.where(groupFileOffsets >= 0))
groupOffset = groupFileOffsets[fileNum]
# load the length (by type) of this group/subgroup from the group catalog
with h5py.File(gcPath(basePath, snapNum, fileNum), 'r') as f:
r['lenType'] = f[type][type+'LenType'][groupOffset, :]
# old or new format: load the offset (by type) of this group/subgroup within the snapshot
if 'fof_subhalo' in gcPath(basePath, snapNum):
with h5py.File(offsetPath(basePath, snapNum), 'r') as f:
r['offsetType'] = f[type+'/SnapByType'][id, :]
else:
with h5py.File(gcPath(basePath, snapNum, fileNum), 'r') as f:
r['offsetType'] = f['Offsets'][type+'_SnapByType'][groupOffset, :]
return r
def loadSubhalo(basePath, snapNum, id, partType, fields=None):
""" Load all particles/cells of one type for a specific subhalo
(optionally restricted to a subset fields). """
# load subhalo length, compute offset, call loadSubset
subset = getSnapOffsets(basePath, snapNum, id, "Subhalo")
return loadSubset(basePath, snapNum, partType, fields, subset=subset)
def loadHalo(basePath, snapNum, id, partType, fields=None):
""" Load all particles/cells of one type for a specific halo
(optionally restricted to a subset fields). """
# load halo length, compute offset, call loadSubset
subset = getSnapOffsets(basePath, snapNum, id, "Group")
return loadSubset(basePath, snapNum, partType, fields, subset=subset) | ALMASim | /ALMASim-1.4.tar.gz/ALMASim-1.4/submodules/illustris_python/snapshot.py | snapshot.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.