code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
!function(a,b,c,d){function e(b,c){this.element=a(b),this.settings=a.extend({},w,c),this._defaults=w,this._name=m,this.init()}function f(b){v&&(b.element.addClass("navbar-hidden").animate({top:-b.element.height()},{queue:!1,duration:b.settings.animationDuration}),a(".dropdown.open .dropdown-toggle",b.element).dropdown("toggle"),v=!1)}function g(a){v||(a.element.removeClass("navbar-hidden").animate({top:0},{queue:!1,duration:a.settings.animationDuration}),v=!0)}function h(a){var b=n.scrollTop(),c=b-t;if(t=b,0>c){if(v)return;(a.settings.showOnUpscroll||l>=b)&&g(a)}else if(c>0){if(!v)return void(a.settings.showOnBottom&&b+u===o.height()&&g(a));b>=l&&f(a)}}function i(a){a.settings.disableAutohide||(s=(new Date).getTime(),h(a))}function j(a){o.on("scroll."+m,function(){(new Date).getTime()-s>r?i(a):(clearTimeout(p),p=setTimeout(function(){i(a)},r))}),n.on("resize."+m,function(){clearTimeout(q),q=setTimeout(function(){u=n.height()},r)})}function k(){o.off("."+m),n.off("."+m)}var l,m="autoHidingNavbar",n=a(b),o=a(c),p=null,q=null,r=70,s=0,t=null,u=n.height(),v=!0,w={disableAutohide:!1,showOnUpscroll:!0,showOnBottom:!0,hideOffset:"auto",animationDuration:200};e.prototype={init:function(){return this.elements={navbar:this.element},this.setDisableAutohide(this.settings.disableAutohide),this.setShowOnUpscroll(this.settings.showOnUpscroll),this.setShowOnBottom(this.settings.showOnBottom),this.setHideOffset(this.settings.hideOffset),this.setAnimationDuration(this.settings.animationDuration),l="auto"===this.settings.hideOffset?this.element.height():this.settings.hideOffset,j(this),this.element},setDisableAutohide:function(a){return this.settings.disableAutohide=a,this.element},setShowOnUpscroll:function(a){return this.settings.showOnUpscroll=a,this.element},setShowOnBottom:function(a){return this.settings.showOnBottom=a,this.element},setHideOffset:function(a){return this.settings.hideOffset=a,this.element},setAnimationDuration:function(a){return this.settings.animationDuration=a,this.element},show:function(){return g(this),this.element},hide:function(){return f(this),this.element},destroy:function(){return k(this),g(this),a.data(this,"plugin_"+m,null),this.element}},a.fn[m]=function(b){var c=arguments;if(b===d||"object"==typeof b)return this.each(function(){a.data(this,"plugin_"+m)||a.data(this,"plugin_"+m,new e(this,b))});if("string"==typeof b&&"_"!==b[0]&&"init"!==b){var f;return this.each(function(){var d=a.data(this,"plugin_"+m);d instanceof e&&"function"==typeof d[b]&&(f=d[b].apply(d,Array.prototype.slice.call(c,1)))}),f!==d?f:this}}}(jQuery,window,document); | ASMO | /ASMO-0.4.tar.gz/ASMO-0.4/packages/asmo/client/js/jquery.bootstrap-autohidingnavbar.min.js | jquery.bootstrap-autohidingnavbar.min.js |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import numpy as np
from datetime import date
import csv
import js
import json
import asyncio
import io
from io import StringIO
from js import Blob, document, window, dfd
from pyodide.ffi import create_proxy, to_js
import datetime
import os
async def DATA_CLEANER(FILE):
DATA = [row for row in csv.reader(FILE.splitlines(), delimiter=',')]
df = pd.DataFrame(list(DATA)[1:], columns = list(DATA)[0:1][0])
print(list(DATA)[0:1][0])
#VARIABLES ATTACHED WITH THE STRING OF A CLUMN VARIABLE FROM THE RANKING DETAIL FILE
KW = 'Keyword'
VSBY = 'Visibility'
SE = "SE"
NOTE = "NOTE"
RP = 'Ranking page(s)'
_URL_ = "URL"
URL_RANK = "Google URL Found"
GMHUF = "Google Mobile HOU URL Found"
GHUF= "Google HOU URL Found"
BUUF = "Bing US URL Found"
YUF = "Yahoo! URL Found"
URL_FOUND = "Google URL Found"
LP_1= "Local Pack (1)"
GOD_SERP = "Google HOU SERP Features"
GOM_SERP = "Google Mobile HOU SERP Features"
BNG_SERP = "Bing US SERP Features"
YAH_SERP = "Yahoo! SERP Features"
NEW_GOD_SERP = "Google Houston SERP Features"
NEW_GOM_SERP = "Houston MOB SERP Features"
GMH_SERP = 'Google Mob HOU SERP Features'
GHOU_SERP = 'Google Houston SERP Features'
ARRAY_GOOGLE = []
ARRAY_GOOGLE_MOBILE = []
ARRAY_BING = []
ARRAY_YAHOO = []
def check_word_in_list(word, string_list):
for string in string_list:
if word in string:
return True
return False
def COLUMN_NAME():
for x in range(len(list(DATA)[0:1][0])):
if "Google" in list(DATA)[0:1][0][x]:
if "mobile" in list(DATA)[0:1][0][x] or "Mobile" in list(DATA)[0:1][0][x] or "MOB" in list(DATA)[0:1][0][x] or "Mob" in list(DATA)[0:1][0][x] or "mob" in list(DATA)[0:1][0][x]:
if "previous" in list(DATA)[0:1][0][x] or "Previous" in list(DATA)[0:1][0][x]:
global GOOGLE_MOBILE_PREVIOUS
GOOGLE_MOBILE_PREVIOUS = list(DATA)[0:1][0][x]
else:
if "Difference" in list(DATA)[0:1][0][x] or "difference" in list(DATA)[0:1][0][x]:
global GOOGLE_MOBILE_DIFFERENCE
GOOGLE_MOBILE_DIFFERENCE = list(DATA)[0:1][0][x]
else:
if "Rank" in list(DATA)[0:1][0][x]:
global GOOGLE_MOBILE_RANK
GOOGLE_MOBILE_RANK = list(DATA)[0:1][0][x]
else:
if "URL" in list(DATA)[0:1][0][x]:
global GOOGLE_MOBILE_URL
GOOGLE_MOBILE_URL = list(DATA)[0:1][0][x]
else:
if "SERP" in list(DATA)[0:1][0][x]:
global GOOGLE_MOBILE_SERP
GOOGLE_MOBILE_SERP = list(DATA)[0:1][0][x]
else:
pass
else:
if "previous" in list(DATA)[0:1][0][x] or "Previous" in list(DATA)[0:1][0][x]:
global GOOGLE_PREVIOUS
GOOGLE_PREVIOUS = list(DATA)[0:1][0][x]
else:
if "Difference" in list(DATA)[0:1][0][x] or "difference" in list(DATA)[0:1][0][x]:
global GOOGLE_DIFFERENCE
GOOGLE_DIFFERENCE = list(DATA)[0:1][0][x]
else:
if "Rank" in list(DATA)[0:1][0][x]:
global GOOGLE_RANK
GOOGLE_RANK = list(DATA)[0:1][0][x]
else:
if "URL" in list(DATA)[0:1][0][x]:
global GOOGLE_URL
GOOGLE_URL = list(DATA)[0:1][0][x]
else:
if "SERP" in list(DATA)[0:1][0][x]:
global GOOGLE_SERP
GOOGLE_SERP = list(DATA)[0:1][0][x]
else:
pass
else:
if "Houston MOB" in list(DATA)[0:1][0][x]:
if "previous" in list(DATA)[0:1][0][x] or "Previous" in list(DATA)[0:1][0][x]:
GOOGLE_MOBILE_PREVIOUS = list(DATA)[0:1][0][x]
else:
if "Difference" in list(DATA)[0:1][0][x] or "difference" in list(DATA)[0:1][0][x]:
GOOGLE_MOBILE_DIFFERENCE = list(DATA)[0:1][0][x]
else:
if "Rank" in list(DATA)[0:1][0][x]:
GOOGLE_MOBILE_RANK = list(DATA)[0:1][0][x]
else:
if "URL" in list(DATA)[0:1][0][x]:
GOOGLE_MOBILE_URL = list(DATA)[0:1][0][x]
else:
if "SERP" in list(DATA)[0:1][0][x]:
GOOGLE_MOBILE_SERP = list(DATA)[0:1][0][x]
else:
pass
else:
if "Yahoo" in list(DATA)[0:1][0][x] or "yahoo" in list(DATA)[0:1][0][x]:
if "previous" in list(DATA)[0:1][0][x] or "Previous" in list(DATA)[0:1][0][x]:
global YAHOO_PREVIOUS
YAHOO_PREVIOUS = list(DATA)[0:1][0][x]
else:
if "Difference" in list(DATA)[0:1][0][x] or "difference" in list(DATA)[0:1][0][x]:
global YAHOO_DIFFERENCE
YAHOO_DIFFERENCE = list(DATA)[0:1][0][x]
else:
if "Rank" in list(DATA)[0:1][0][x]:
global YAHOO_RANK
YAHOO_RANK = list(DATA)[0:1][0][x]
else:
if "URL" in list(DATA)[0:1][0][x]:
global YAHOO_URL
YAHOO_URL = list(DATA)[0:1][0][x]
else:
if "SERP" in list(DATA)[0:1][0][x]:
global YAHOO_SERP
YAHOO_SERP = list(DATA)[0:1][0][x]
else:
pass
else:
if "Bing" in list(DATA)[0:1][0][x] or "bing" in list(DATA)[0:1][0][x]:
if "previous" in list(DATA)[0:1][0][x] or "Previous" in list(DATA)[0:1][0][x]:
global BING_PREVIOUS
BING_PREVIOUS = list(DATA)[0:1][0][x]
else:
if "Difference" in list(DATA)[0:1][0][x] or "difference" in list(DATA)[0:1][0][x]:
global BING_DIFFERENCE
BING_DIFFERENCE = list(DATA)[0:1][0][x]
else:
if "Rank" in list(DATA)[0:1][0][x]:
global BING_RANK
BING_RANK = list(DATA)[0:1][0][x]
else:
if "URL" in list(DATA)[0:1][0][x]:
global BING_URL
BING_URL = list(DATA)[0:1][0][x]
else:
if "SERP" in list(DATA)[0:1][0][x]:
global BING_SERP
BING_SERP = list(DATA)[0:1][0][x]
else:
pass
else:
pass
return GOOGLE_MOBILE_PREVIOUS, GOOGLE_MOBILE_DIFFERENCE, GOOGLE_MOBILE_RANK, GOOGLE_PREVIOUS, GOOGLE_DIFFERENCE, GOOGLE_RANK, YAHOO_PREVIOUS, YAHOO_DIFFERENCE, YAHOO_RANK, BING_RANK, BING_DIFFERENCE, BING_PREVIOUS;
COLUMN_NAME()
GOD_F = {KW: df[KW], GOOGLE_URL: df[GOOGLE_URL], GOOGLE_RANK: df[GOOGLE_RANK], GOOGLE_PREVIOUS: df[GOOGLE_PREVIOUS], GOOGLE_DIFFERENCE: df[GOOGLE_DIFFERENCE]}
GOM_F = {KW: df[KW],GOOGLE_MOBILE_URL: df[GOOGLE_MOBILE_URL], GOOGLE_MOBILE_PREVIOUS: df[GOOGLE_MOBILE_PREVIOUS], GOOGLE_MOBILE_RANK: df[GOOGLE_MOBILE_RANK], GOOGLE_MOBILE_DIFFERENCE: df[GOOGLE_MOBILE_DIFFERENCE]}
BNG_F = {KW: df[KW],BING_URL: df[BING_URL], BING_RANK: df[BING_RANK], BING_DIFFERENCE: df[BING_DIFFERENCE], BING_PREVIOUS: df[BING_PREVIOUS]}
YAH_F = {KW: df[KW],YAHOO_URL: df[YAHOO_URL], YAHOO_RANK: df[YAHOO_RANK],YAHOO_PREVIOUS: df[YAHOO_PREVIOUS], YAHOO_DIFFERENCE: df[YAHOO_DIFFERENCE]}
GOD_H = pd.DataFrame(GOD_F)
GOM_H = pd.DataFrame(GOM_F)
BNG_H = pd.DataFrame(BNG_F)
YAH_H = pd.DataFrame(YAH_F)
GOD_CLEAN_1 = GOD_H[GOD_H[GOOGLE_DIFFERENCE] != "Stays out"]
GOM_CLEAN_1 = GOM_H[GOM_H[GOOGLE_MOBILE_DIFFERENCE] != "Stays out"]
BNG_CLEAN_1 = BNG_H[BNG_H[BING_DIFFERENCE] != "Stays out"]
YAH_CLEAN_1 = YAH_H[YAH_H[YAHOO_DIFFERENCE] != "Stays out"]
GOD_CLEAN_2 = GOD_CLEAN_1[GOD_CLEAN_1[GOOGLE_DIFFERENCE] != "Dropped"]
GOM_CLEAN_2 = GOM_CLEAN_1[GOM_CLEAN_1[GOOGLE_MOBILE_DIFFERENCE] != "Dropped"]
BNG_CLEAN_2 = BNG_CLEAN_1[BNG_CLEAN_1[BING_DIFFERENCE] != "Dropped"]
YAH_CLEAN_2 = YAH_CLEAN_1[YAH_CLEAN_1[YAHOO_DIFFERENCE] != "Dropped"]
ArrayGOOGLE_RANK = pd.array(GOD_CLEAN_2[GOOGLE_RANK])
ArrayGOOGLE_DIFFERENCE = pd.array(GOD_CLEAN_2[GOOGLE_DIFFERENCE])
ArrayGOOGLE_MOBILE_RANK = pd.array(GOM_CLEAN_2[GOOGLE_MOBILE_RANK])
ArrayGOOGLE_MOBILE_DIFFERENCE = pd.array(GOM_CLEAN_2[GOOGLE_MOBILE_DIFFERENCE])
def NEG_Y_FUNCTION(X, Y, ARRAY):
Y = int(Y)
X = int(X)
z = Y + X
ARRAY.append(z)
def X_GRT_Y_LOGIC_FUNCTION(X, Y, ARRAY):
Y = int(Y)
X = int(X)
if X > Y or X == Y:
z = X + Y
ARRAY.append(z)
else:
Y = str(Y)
if "-" in Y:
Y = int(Y)
ARRAY.append(Y)
else:
z = X + int(Y)
ARRAY.append(z)
def Y_FUNCTION(X, Y, ARRAY):
Y = int(Y)
if Y >= 0:
X_GRT_Y_LOGIC_FUNCTION(X, Y, ARRAY)
else:
NEG_Y_FUNCTION(X, Y, ARRAY)
def LOGIC_X_Y_STR(X, Y, RA):
if "+" in str(Y):
POS_Y = int(str(Y)[1])
X_GRT_Y_LOGIC_FUNCTION(X, POS_Y, RA)
else:
POS_X = int(X)
Y_FUNCTION(POS_X, Y, RA)
def STANDARD_XY_FUNCTION(AR, AD, RA):
if "(" in str(AR) or str(AD):
if "(" in str(AD):
NEW_AD = str(AD)
RANK_Y = int(NEW_AD[0])
if "(" in str(AR):
NEW_AR = str(AR)
RANK_X = int(NEW_AR[0])
Y_FUNCTION(RANK_X, RANK_Y, RA)
else:
Y_FUNCTION(int(AR), RANK_Y, RA)
else:
if "(" in str(AR):
NEW_AR = str(AR)
RANK_X = int(NEW_AR[0])
y = int(AD)
Y_FUNCTION(RANK_X, y, RA)
else:
y = int(AD)
Y_FUNCTION(AR, y, RA)
else:
DIF_Y = int(AD)
LOGIC_X_Y_STR(AR, DIF_Y, RA)
def REMOVE_BARS_FUNCTION(ARRAY_R, X, R_A):
if "(" in str(ARRAY_R[X]):
NEW_AR = str(ARRAY_R[X])
RANK_X = NEW_AR[0]
R_A.append(RANK_X)
else:
R_A.append(ARRAY_R[X])
def BAR_RMV_ARRAY_OUTPUT (ARRAY_RANK):
RankArray = []
for x in range(len(ARRAY_RANK)):
REMOVE_BARS_FUNCTION(ARRAY_RANK, x, RankArray)
return RankArray
def NOT_A_FLOAT_FUNCTION(AR, AD, RA):
if len(AD) > 3 or pd.isna(AD) == True or str(AD) == '':
if "(" in str(AD):
NEW_AD = str(AD)
RANK_Y = int(NEW_AD[0])
STANDARD_XY_FUNCTION(AR, RANK_Y, RA)
else:
if "N" in str(AR):
RA.append(30)
else:
RA.append(30)
else:
if "N" in str(AR):
RA.append(30)
else:
STANDARD_XY_FUNCTION(AR, AD, RA)
#def DEL_STR (AR, AD, RA):
#if len(NEW_AR) > 3 == True:
def ITS_A_FLOAT_FUNCTION(AR, AD, RA):
if pd.isna(AD) == True:
if "N" in str(AR):
RA.append(30)
else:
RA.append(30)
else:
STANDARD_XY_FUNCTION(AR, AD, RA)
##ARRAY_OUTPUT IS THE CONCLUSION LOGIC FORM FUNCTION INTEGRATED FROM OF ALL ABOVE FUNCTIONS
##ARRAY_DIF TAKES IN THE ARRAY HOLDING THE DIFFERENCE COLUMN VARIABLES
##ARRAY_RANK TAKES IN THE ARRAY HOLDING THE RANK COLUMN VARIABLES
##DCDF TAKES IN THE WHOLE DATAFRAME OF THE DC VARIABLE
def ARRAY_OUTPUT (ARRAY_DIF, ARRAY_RANK):
## CREATES AN BLANK ARRAY TO APPEND NEW VARIABLES TO
RankArray = []
##A FOR LOOP IS CREATED TO GO OVER ALL VARIABLES IN THE RANGE OF THE LENGTH
##OF THE DIFFERENCE COLUMNS VARIABLE (ARRAY_DIF)
for x in range(len(ARRAY_DIF)):
##IF THE ARRAYS VARIABLE AT THE INDEX POSITION IS A FLOAT THEN THIS WILL EXECUTE A FUNCTION
#CALLED ITS_A_FLOAT_FUNCTION
if isinstance(ARRAY_DIF[x], float) == True:
ITS_A_FLOAT_FUNCTION(ARRAY_RANK[x], ARRAY_DIF[x], RankArray)
##IF THE ARRAYS VARIABLE AT THE INDEX POSITION IS
#NOT A FLOAT THEN THIS WILL EXECUTE A FUNCTION
#CALLED NOT_A_FLOAT_FUNCTION
else:
NOT_A_FLOAT_FUNCTION(ARRAY_RANK[x], ARRAY_DIF[x], RankArray)
##RETURNS THE DATA STORED IN THE VARIABLE SO IT CAN BE STORED ELSEWHERE
##FOR FURTHER DATA MANIPLUATION
return RankArray
def ENTER_NEG_Y_FUNCTION(X, Y, ARRAY):
Y = -Y
ARRAY.append(Y)
def ENTER_X_GRT_Y_LOGIC_FUNCTION(X, Y, ARRAY):
ARRAY.append(Y)
def ENTER_Y_FUNCTION(X, Y, ARRAY):
Y = int(Y)
if Y >= 0:
ENTER_X_GRT_Y_LOGIC_FUNCTION(X, Y, ARRAY)
else:
ENTER_NEG_Y_FUNCTION(X, Y, ARRAY)
def ENTER_LOGIC_X_Y_STR(ARRAY, Y, RA):
Y = str(Y)
if "+" in str(Y):
POS_Y = int(str(Y)[1])
ENTER_X_GRT_Y_LOGIC_FUNCTION(ARRAY, POS_Y, RA)
else:
if "N" in ARRAY:
RA.append(30)
else:
POS_X = int(ARRAY)
ENTER_Y_FUNCTION(POS_X, Y, RA)
def ENTER_STANDARD_XY_FUNCTION(AR, AD, RA):
if "(" in str(AR) or str(AD):
if "(" in str(AD):
NEW_AD = str(AD)
RANK_Y = int(NEW_AD[0])
if "(" in str(AR):
NEW_AR = str(AR)
RANK_X = int(NEW_AR[0])
ENTER_Y_FUNCTION(RANK_X, RANK_Y, RA)
else:
ENTER_Y_FUNCTION(int(AR), RANK_Y, RA)
else:
if "(" in str(AR):
NEW_AR = str(AR)
RANK_X = int(NEW_AR[0])
y = int(AD)
ENTER_Y_FUNCTION(RANK_X, y, RA)
else:
ENTER_Y_FUNCTION(AR, AD, RA)
else:
DIF_Y = int(AD)
ENTER_LOGIC_X_Y_STR(AR, DIF_Y, RA)
#####
def ENTER_NOT_A_FLOAT_FUNCTION(AR, AD, RA):
if len(AD) > 3 or pd.isna(AD) == True or str(AD) == '':
if "(" in str(AR) or "(" in str(AD):
ENTER_STANDARD_XY_FUNCTION(AR, AD, RA)
else:
if "N" in str(AR):
RA.append(30)
else:
if str(AD) == '':
RA.append(30)
else:
RA.append(30-int(AR))
else:
ENTER_STANDARD_XY_FUNCTION(AR, AD, RA)
def ENTER_ITS_A_FLOAT_FUNCTION(AR, AD, RA):
if pd.isna(AD) == True:
if "N" in str(AR):
RA.append(30)
else:
RA.append(30-int(AR))
else:
ENTER_STANDARD_XY_FUNCTION(AR, AD, RA)
def CHANGE_ENTER_FUNCTION(AR, AD, AP):
RankArray = []
for x in range(len(AD)):
if isinstance(AD[x], float) == True:
if pd.isna(AD[x]) == True or "E" in str(AD[x]):
if len(str(AR[x])) > 3 or pd.isna(AR[x]) == True:
if "E" in str(AD[x]):
ARX = int(AR[x])
RankArray.append(30-AR[x])
else:
ENTER_ITS_A_FLOAT_FUNCTION(AR[x], AD[x], RankArray)
else:
ENTER_ITS_A_FLOAT_FUNCTION(AR[x], AD[x], RankArray)
else:
ENTER_ITS_A_FLOAT_FUNCTION(AR[x], AD[x], RankArray)
else:
if len(AD[x]) > 3 or pd.isna(AD[x]) == True or "E" in str(AD[x]):
if len(str(AR[x])) > 3 or pd.isna(AR[x]) == True or "E" in str(AD[x]):
if "E" in str(AD[x]):
ARX = int(AR[x])
RankArray.append(30-ARX)
else:
ENTER_NOT_A_FLOAT_FUNCTION(AR[x], AD[x], RankArray)
else:
ENTER_NOT_A_FLOAT_FUNCTION(AR[x], AD[x], RankArray)
else:
ENTER_NOT_A_FLOAT_FUNCTION(AR[x], AD[x], RankArray)
return RankArray
def NOTE_Y_FUNCTION(X, Y, ARRAY):
X = int(X)
Y = int(Y)
if X < Y:
ARRAY.append("UP")
if X > Y:
ARRAY.append("DOWN")
if X == Y:
ARRAY.append("EQ")
def CHANGE_NOTE_FUNCTION(AR, AP):
RankArray = []
for x in range(len(AP)):
if AP[x] == 30 or len(AR[x]) > 3 or len(str(AP[x])) > 3:
RankArray.append('NEW')
else:
ARX = int(AR[x])
NOTE_Y_FUNCTION(ARX, AP[x], RankArray)
return RankArray
Array_GOOGLE_RANK = BAR_RMV_ARRAY_OUTPUT(ArrayGOOGLE_RANK)
GPD = ARRAY_OUTPUT(ArrayGOOGLE_DIFFERENCE, Array_GOOGLE_RANK)
Array_GOOGLE_MOBILE_RANK = BAR_RMV_ARRAY_OUTPUT(ArrayGOOGLE_MOBILE_RANK)
GOOGLE_MOBILE_PREVIOUSDF = ARRAY_OUTPUT(ArrayGOOGLE_MOBILE_DIFFERENCE, Array_GOOGLE_MOBILE_RANK)
ArrayBING_DIFFERENCE = pd.array(BNG_CLEAN_2[BING_DIFFERENCE])
ArrayBING_RANK = pd.array(BNG_CLEAN_2[BING_RANK])
Array_BING_RANK = BAR_RMV_ARRAY_OUTPUT(ArrayBING_RANK)
Array_BING_DIFFERENCE = BAR_RMV_ARRAY_OUTPUT(ArrayBING_DIFFERENCE)
BING_DIFFERENCEF = ARRAY_OUTPUT(Array_BING_DIFFERENCE, Array_BING_RANK)
ArrayYAHOO_DIFFERENCE = pd.array(YAH_CLEAN_2[YAHOO_DIFFERENCE])
ArrayYAHOO_RANK = pd.array(YAH_CLEAN_2[YAHOO_RANK])
Array_YAHOO_RANK = BAR_RMV_ARRAY_OUTPUT(ArrayYAHOO_RANK)
YAHDF = ARRAY_OUTPUT(ArrayYAHOO_DIFFERENCE, Array_YAHOO_RANK)
GODIF_NEW = CHANGE_ENTER_FUNCTION(Array_GOOGLE_RANK, ArrayGOOGLE_DIFFERENCE, GPD)
GOMDIF_NEW = CHANGE_ENTER_FUNCTION(Array_GOOGLE_MOBILE_RANK, ArrayGOOGLE_MOBILE_DIFFERENCE, GOOGLE_MOBILE_PREVIOUSDF)
BNGDIF_NEW = CHANGE_ENTER_FUNCTION(Array_BING_RANK, ArrayBING_DIFFERENCE, BING_DIFFERENCEF)
YAHOO_DIFFERENCE_NEW = CHANGE_ENTER_FUNCTION(Array_YAHOO_RANK, ArrayYAHOO_DIFFERENCE, YAHDF)
GOD_NOTE = CHANGE_NOTE_FUNCTION(Array_GOOGLE_RANK, GPD)
GOM_NOTE = CHANGE_NOTE_FUNCTION(Array_GOOGLE_MOBILE_RANK, GOOGLE_MOBILE_PREVIOUSDF)
BNG_NOTE = CHANGE_NOTE_FUNCTION(Array_BING_RANK, BING_DIFFERENCEF)
YAH_NOTE = CHANGE_NOTE_FUNCTION(Array_YAHOO_RANK, YAHDF)
GOD = {KW: GOD_CLEAN_2[KW],RP: GOD_CLEAN_2[GOOGLE_URL], GOOGLE_RANK: Array_GOOGLE_RANK,
GOOGLE_PREVIOUS: GPD, GOOGLE_DIFFERENCE: GODIF_NEW, NOTE: GOD_NOTE}
GOM = {KW: GOM_CLEAN_2[KW], RP: GOM_CLEAN_2[GOOGLE_MOBILE_URL], GOOGLE_MOBILE_RANK: Array_GOOGLE_MOBILE_RANK,
GOOGLE_MOBILE_PREVIOUS: GOOGLE_MOBILE_PREVIOUSDF, GOOGLE_MOBILE_DIFFERENCE: GOMDIF_NEW, NOTE: GOM_NOTE}
BNG = {KW: BNG_CLEAN_2[KW], RP: BNG_CLEAN_2[BING_URL], BING_RANK: Array_BING_RANK,
BING_PREVIOUS: BING_DIFFERENCEF, BING_DIFFERENCE: BNGDIF_NEW, NOTE: BNG_NOTE}
YAH = {KW: YAH_CLEAN_2[KW],RP: YAH_CLEAN_2[YAHOO_URL], YAHOO_RANK: Array_YAHOO_RANK,
YAHOO_PREVIOUS: YAHDF, YAHOO_DIFFERENCE: YAHOO_DIFFERENCE_NEW, NOTE: YAH_NOTE}
GOD_DF = pd.DataFrame(data=GOD)
GOD_NEW = pd.DataFrame(data=GOD_DF[GOD_DF[GOOGLE_RANK] != "Not in top 30"])
GOD_NEW = pd.DataFrame(data=GOD_NEW[GOD_NEW[GOOGLE_RANK] != "Not in top 50"])
GOD_LEN = len(GOD_NEW)
GOD_1 = pd.DataFrame(data=GOD_NEW[GOD_NEW[GOOGLE_RANK] == "1"])
GOD_NEW_NOTE = pd.DataFrame(data=GOD_NEW[GOD_NEW[NOTE] == "NEW"])
GOD_NOTE = len(GOD_NEW_NOTE)
GOD_UP_NOTE = pd.DataFrame(data=GOD_NEW[GOD_NEW[NOTE] == "UP"])
GOD_UP = len(GOD_UP_NOTE)
GOD_DOWN_NOTE = pd.DataFrame(data=GOD_NEW[GOD_NEW[NOTE] == "DOWN"])
GOD_DOWN = len(GOD_DOWN_NOTE)
GOD_EQ_NOTE = pd.DataFrame(data=GOD_NEW[GOD_NEW[NOTE] == "EQ"])
GOD_EQ = len(GOD_EQ_NOTE)
GOD_LP_LEN = pd.DataFrame(data=df[GOOGLE_SERP])
GOD_LP = len(GOD_LP_LEN)
GOD_1_LEN = len(GOD_1)
LP_GOD_ = []
for x in range(len(df)):
if LP_1 in str(GOD_LP_LEN[GOOGLE_SERP][x]):
LP_GOD_.append(GOD_LP_LEN[GOOGLE_SERP][x])
else:
False
TOP_5GOD = []
GOD_FIRST_PAGE = []
GOD_TWO_PAGE = []
LP_GOD = pd.DataFrame()
for x in range(5):
TOP_5GOD.append(len(GOD_NEW[GOD_NEW[GOOGLE_RANK] == str(x+1)]))
for x in range(10):
GOD_NEW[GOOGLE_RANK]
GOD_FIRST = len(pd.DataFrame(data=GOD_NEW[GOD_NEW[GOOGLE_RANK] == str(x+1)]))
GOD_FIRST_PAGE.append(GOD_FIRST)
for x in range(20):
GOD_TWO = len(pd.DataFrame(data=GOD_NEW[GOD_NEW[GOOGLE_RANK] == str(x+1)]))
GOD_TWO_PAGE.append(GOD_TWO)
GOD_5_TOP = pd.DataFrame(data=TOP_5GOD)
GOD5_TOP = GOD_5_TOP.sum()[0]
GOD_FIRST_PAGE_TOP = pd.DataFrame(data=GOD_FIRST_PAGE)
GODFIRST_TOP = GOD_FIRST_PAGE_TOP.sum()[0]
GOD_TWO_PAGE_TOP = pd.DataFrame(data=GOD_TWO_PAGE)
GODTWO_TOP= GOD_TWO_PAGE_TOP.sum()[0]
GOM_DF = pd.DataFrame(data=GOM)
TOP_5GOM = []
GOM_FIRST_PAGE = []
GOM_TWO_PAGE = []
LP_GOM = []
GOM_NEW = pd.DataFrame(data=GOM_DF[GOM_DF[GOOGLE_MOBILE_RANK] != "Not in top 30"])
GOM_NEW = pd.DataFrame(data=GOM_NEW[GOM_NEW[GOOGLE_MOBILE_RANK] != "Not in top 50"])
GOM_1 = pd.DataFrame(data=GOM_NEW[GOM_NEW[GOOGLE_MOBILE_RANK] == "1"])
for x in range(5):
GOM_5 = len(pd.DataFrame(data=GOM_NEW[GOM_NEW[GOOGLE_MOBILE_RANK] == str(x+1)]))
TOP_5GOM.append(GOM_5)
for x in range(10):
GOM_FIRST = len(pd.DataFrame(data=GOM_NEW[GOM_NEW[GOOGLE_MOBILE_RANK] == str(x+1)]))
GOM_FIRST_PAGE.append(GOM_FIRST)
for x in range(20):
GOM_TWO = len(pd.DataFrame(data=GOM_NEW[GOM_NEW[GOOGLE_MOBILE_RANK] == str(x+1)]))
GOM_TWO_PAGE.append(GOM_TWO)
GOM_LEN = len(GOM_DF)
GOM_NEW_NOTE = pd.DataFrame(data=GOM_NEW[GOM_NEW[NOTE] == "NEW"])
GOM_NOTE = len(GOM_NEW_NOTE)
GOM_UP_NOTE = pd.DataFrame(data=GOM_NEW[GOM_NEW[NOTE] == "UP"])
GOM_UP = len(GOM_UP_NOTE)
GOM_DOWN_NOTE = pd.DataFrame(data=GOM_NEW[GOM_NEW[NOTE] == "DOWN"])
GOM_DOWN = len(GOM_DOWN_NOTE)
GOM_EQ_NOTE = pd.DataFrame(data=GOM_NEW[GOM_NEW[NOTE] == "EQ"])
GOM_EQ = len(GOM_EQ_NOTE)
GOM_LP_LEN = pd.DataFrame(data=df[GOOGLE_MOBILE_SERP])
GOM_LP = len(GOM_LP_LEN)
GOM_1_LEN = len(GOM_1)
for x in range(len(df)):
if LP_1 in str(GOM_LP_LEN[GOOGLE_MOBILE_SERP][x]):
LP_GOM.append(GOM_LP_LEN[GOOGLE_MOBILE_SERP][x])
else:
False
GOM_5_TOP = pd.DataFrame(data=TOP_5GOM)
GOM5_TOP = GOM_5_TOP.sum()[0]
GOM_FIRST_PAGE_TOP = pd.DataFrame(data=GOM_FIRST_PAGE)
GOMFIRST_TOP = GOM_FIRST_PAGE_TOP.sum()[0]
GOM_TWO_PAGE_TOP = pd.DataFrame(data=GOM_TWO_PAGE)
GOMTWO_TOP= GOM_TWO_PAGE_TOP.sum()[0]
BNG_DF = pd.DataFrame(data=BNG)
BNG_NEW = pd.DataFrame(data=BNG_DF[BNG_DF[BING_RANK] != "Not in top 30"])
BNG_NEW = pd.DataFrame(data=BNG_NEW[BNG_NEW[BING_RANK] != "Not in top 50"])
BNG_LEN = len(BNG_NEW)
BNG_1= pd.DataFrame(data=BNG_NEW[BNG_NEW[BING_RANK] == "1"])
BNG_NEW_NOTE = pd.DataFrame(data=BNG_NEW[BNG_NEW[NOTE] == "NEW"])
BNG_NOTE = len(BNG_NEW_NOTE)
BNG_UP_NOTE = pd.DataFrame(data=BNG_NEW[BNG_NEW[NOTE] == "UP"])
BNG_UP = len(BNG_UP_NOTE)
BNG_DOWN_NOTE = pd.DataFrame(data=BNG_NEW[BNG_NEW[NOTE] == "DOWN"])
BNG_DOWN = len(BNG_DOWN_NOTE)
BNG_EQ_NOTE = pd.DataFrame(data=BNG_NEW[BNG_NEW[NOTE] == "EQ"])
BNG_EQ = len(BNG_EQ_NOTE)
BNG_LP_LEN = pd.DataFrame(data=df[BNG_SERP])
BNG_LP = len(BNG_LP_LEN)
BNG_1_LEN = len(BNG_1)
TOP_5BNG = []
BNG_FIRST_PAGE = []
BNG_TWO_PAGE = []
LP_BNG_ = []
if LP_1 in str(BNG_LP_LEN[BING_SERP][x]):
LP_BNG_.append(BNG_LP_LEN[BING_SERP][x])
else:
False
for x in range(5):
BNG_5 = len(pd.DataFrame(data=BNG_NEW[BNG_NEW[BING_RANK] == str(x+1)]))
TOP_5BNG.append(BNG_5)
for x in range(10):
BNG_FIRST = len(pd.DataFrame(data=BNG_NEW[BNG_NEW[BING_RANK] == str(x+1)]))
BNG_FIRST_PAGE.append(BNG_FIRST)
for x in range(20):
BNG_TWO = len(pd.DataFrame(data=BNG_NEW[BNG_NEW[BING_RANK] == str(x+1)]))
BNG_TWO_PAGE.append(BNG_TWO)
BNG_5_TOP = pd.DataFrame(data=TOP_5BNG)
BNG5_TOP = BNG_5_TOP.sum()[0]
BNG_FIRST_PAGE_TOP = pd.DataFrame(data=BNG_FIRST_PAGE)
BNGFIRST_TOP = BNG_FIRST_PAGE_TOP.sum()[0]
BNG_TWO_PAGE_TOP = pd.DataFrame(data=BNG_TWO_PAGE)
BNGTWO_TOP= BNG_TWO_PAGE_TOP.sum()[0]
LP_BNG_ = []
YAH_DF = pd.DataFrame(data=YAH)
YAH_NEW = pd.DataFrame(data=YAH_DF[YAH_DF [YAHOO_RANK] != "Not in top 30"])
YAH_NEW = pd.DataFrame(data=YAH_NEW[YAH_NEW[YAHOO_RANK] != "Not in top 50"])
YAH_LEN = len(YAH_NEW)
YAH_1= pd.DataFrame(data=YAH_NEW[YAH_NEW[YAHOO_RANK] == "1"])
YAH_NEW_NOTE = pd.DataFrame(data=YAH_NEW[YAH_NEW[NOTE] == "NEW"])
YAH_NOTE = len(YAH_NEW_NOTE)
YAH_UP_NOTE = pd.DataFrame(data=YAH_NEW[YAH_NEW[NOTE] == "UP"])
YAH_UP = len(YAH_UP_NOTE)
YAH_DOWN_NOTE = pd.DataFrame(data=YAH_NEW[YAH_NEW[NOTE] == "DOWN"])
YAH_DOWN = len(YAH_DOWN_NOTE)
YAH_EQ_NOTE = pd.DataFrame(data=YAH_NEW[YAH_NEW[NOTE] == "EQ"])
YAH_EQ = len(YAH_EQ_NOTE)
YAH_LP_LEN = pd.DataFrame(data=df[YAH_SERP])
YAH_LP = len(YAH_LP_LEN)
YAH_1_LEN = len(YAH_1)
TOP_5YAH = []
YAH_FIRST_PAGE = []
YAH_TWO_PAGE = []
LP_YAH_ = []
for x in range(len(df)):
if LP_1 in str(YAH_LP_LEN[YAHOO_SERP][x]):
LP_YAH_.append(YAH_LP_LEN[YAHOO_SERP][x])
else:
False
for x in range(5):
YAH_5 = len(pd.DataFrame(data=YAH_NEW[YAH_NEW[YAHOO_RANK] == str(x+1)]))
TOP_5YAH.append(YAH_5)
for x in range(10):
YAH_FIRST = len(pd.DataFrame(data=YAH_NEW[YAH_NEW[YAHOO_RANK] == str(x+1)]))
YAH_FIRST_PAGE.append(YAH_FIRST)
for x in range(20):
YAH_TWO = len(pd.DataFrame(data=YAH_NEW[YAH_NEW[YAHOO_RANK] == str(x+1)]))
YAH_TWO_PAGE.append(YAH_TWO)
YAH_5_TOP = pd.DataFrame(data=TOP_5YAH)
YAH5_TOP = YAH_5_TOP.sum()[0]
YAH_FIRST_PAGE_TOP = pd.DataFrame(data=YAH_FIRST_PAGE)
YAH_FIRST_TOP = YAH_FIRST_PAGE_TOP.sum()[0]
YAH_TWO_PAGE_TOP = pd.DataFrame(data=YAH_TWO_PAGE)
YAH_TWO_TOP= YAH_TWO_PAGE_TOP.sum()[0]
for x in range(len(df)):
if LP_1 in str(df[YAHOO_SERP][x]):
LP_YAH_.append(df[YAHOO_SERP][x])
else:
False
ARR = [GOM_1_LEN, GOD_1_LEN, BNG_1_LEN, YAH_1_LEN]
ARR_1 = pd.DataFrame(ARR)
TOP_1 = ARR_1.sum()[0]
ARR_TOP_5 = [GOM5_TOP, GOD5_TOP, BNG5_TOP, YAH5_TOP]
TOP_5 = pd.DataFrame(ARR_TOP_5)
TOP_5 = TOP_5.sum()[0]
ARR_FIRST_PAGE = [GODFIRST_TOP, GOMFIRST_TOP, BNGFIRST_TOP, YAH_FIRST_TOP]
FIRST_PAGE_DF = pd.DataFrame(ARR_FIRST_PAGE)
FIRST_PAGE = FIRST_PAGE_DF.sum()[0]
ARR_TWO_PAGE = [GODTWO_TOP, GOMTWO_TOP, BNGTWO_TOP, YAH_TWO_TOP]
FIRST_TWO_DF = pd.DataFrame(ARR_TWO_PAGE)
FIRST_TWO = FIRST_TWO_DF.sum()[0]
ARR_NEW_PAGE = [len(GOD_NEW_NOTE[GOD_NEW_NOTE['Ranking page(s)'].isnull() == False]), len(GOM_NEW_NOTE[GOM_NEW_NOTE['Ranking page(s)'].isnull() == False]), len(BNG_NEW_NOTE[BNG_NEW_NOTE['Ranking page(s)'].isnull() == False]), len(YAH_NEW_NOTE[YAH_NEW_NOTE['Ranking page(s)'].isnull() == False])]
FIRST_NEW_DF = pd.DataFrame(ARR_NEW_PAGE)
FIRST_NEW = FIRST_NEW_DF.sum()[0]
ARR_UP_PAGE = [GOD_UP, GOM_UP, BNG_UP, YAH_UP]
ARR_UP_DF = pd.DataFrame(ARR_UP_PAGE)
ARR_UP = ARR_UP_DF.sum()[0]
ARR_DOWN_PAGE = [GOD_DOWN, GOM_DOWN, BNG_DOWN, YAH_DOWN]
ARR_DOWN_DF = pd.DataFrame(ARR_DOWN_PAGE)
ARR_DOWN = ARR_DOWN_DF.sum()[0]
ARR_EQ_PAGE = [GOD_EQ, GOM_EQ, BNG_EQ, YAH_EQ]
ARR_EQ_DF = pd.DataFrame(ARR_EQ_PAGE)
ARR_EQ = ARR_EQ_DF.sum()[0]
ARR_GL = [ARR_UP, ARR_DOWN]
ARR_GL_DF = pd.DataFrame(ARR_GL)
ARR_GLDF = ARR_GL_DF.sum()[0]
now = datetime.datetime.now()
report_created = datetime.date(now.year, now.month, 1).strftime('%m/%d/%Y')
first_day = datetime.date(now.year, now.month, 1) - datetime.timedelta(days=1)
last_day = datetime.date(now.year, now.month, 1) - datetime.timedelta(days=first_day.day)
date_range = f"{last_day.strftime('%m/%d/%Y')} through {first_day.strftime('%m/%d/%Y')}"
head, sep, tail = df['Yahoo! URL Found'][0].replace('https://', "").partition(".com")
NEW_HEAD = head.replace('www.', "")
report_title = f"Actual SEO Media Report for {NEW_HEAD.upper()} Report for: {NEW_HEAD+sep} Report Date Range: {date_range} Report Section: Summary"
report = [
report_title,
"",
"SUM A",
"General Report Statistics",
"Report Created",
"Keywords Analyzed",
"Ranking Check Depth",
"Engines Analyzed",
"Geographic Target",
"Baseline Report Date",
"Baseline Keyword Count",
"Services",
"",
"SUM B",
"Visibility Statistics",
"Listings in the First Position",
"Listings in the Top 5 Positions",
"Listings on the First Page",
"Listings on the First Two Pages",
"Listings New",
"Listings Which Moved Up",
"Listings Which Moved Down",
"Listings Which Did Not Change",
"Total Positions Gained/Lost",
"",
"GRAPH B",
"GRAPH C",
"GOD",
"GOM",
"BIN",
"YAH",
"",
"GRAPH D",
"GOD",
"GOM",
"BIN",
"YAH"
]
DATA_REPORTS = ["","","","",report_created,len(df), "25",'Google, Bing, Yahoo', 'Local', '7/15/2020', '47', 'SEO',"","","", TOP_1, TOP_5, FIRST_PAGE, FIRST_TWO, FIRST_NEW, ARR_UP, ARR_DOWN, ARR_EQ, ARR_GLDF,"",len(df),"",len(GOD_NEW), len(GOM_NEW),len(BNG_NEW), len(YAH_NEW),"","", len(LP_GOD_), len(LP_GOM), len(LP_BNG_), len(LP_YAH_)]
CLEAN_REPORT = pd.DataFrame(index=report, data=DATA_REPORTS)
# tempLink = js.document.createElement('a')
# tempLink2 = js.document.createElement('a')
# tempLink3 = js.document.createElement('a')
# tempLink4 = js.document.createElement('a')
# tempLink5 = js.document.createElement('a')
# blob = js.Blob.new([CLEAN_REPORT.to_csv(index = report , )], { type: 'text/csv' })
# blob2 = js.Blob.new([GOD_NEW.to_csv(index = None, )], { type: 'text/csv' })
# blob3 = js.Blob.new([GOM_NEW.to_csv(index = None, )], { type: 'text/csv' })
# blob4 = js.Blob.new([BNG_NEW.to_csv(index = None, )], { type: 'text/csv' })
# blob5 = js.Blob.new([YAH_NEW.to_csv(index = None, )], { type: 'text/csv' })
# url = js.window.URL.createObjectURL(blob)
# url2 = js.window.URL.createObjectURL(blob2)
# url3 = js.window.URL.createObjectURL(blob3)
# url4 = js.window.URL.createObjectURL(blob4)
# url5 = js.window.URL.createObjectURL(blob5)
# tempLink.href = url
# tempLink2.href = url2
# tempLink3.href = url3
# tempLink4.href = url4
# tempLink5.href = url5
# js.console.log(tempLink)
# js.console.log(tempLink2)
# js.console.log(tempLink3)
# js.console.log(tempLink4)
# js.console.log(tempLink5)
# tempLink.setAttribute('download', "ReportSummary.csv");
# tempLink2.setAttribute('download', 'ReportSERPPositionsGOD.csv');
# tempLink3.setAttribute('download', 'ReportSERPPositionsGOM.csv');
# tempLink4.setAttribute('download', 'ReportSERPPositionsBNG.csv');
# tempLink5.setAttribute('download', 'ReportSERPPositionsYAH.csv');
# tempLink.click();
# tempLink2.click();
# tempLink3.click();
# tempLink4.click();
# tempLink5.click();
links = []
blobs = []
filenames = [f"{NEW_HEAD.upper()}" + "_SUMMARY.csv", "ReportSERPPositionsGOD.csv", "ReportSERPPositionsGOM.csv", "ReportSERPPositionsBNG.csv", "ReportSERPPositionsYAH.csv"]
dataframes = [CLEAN_REPORT, GOD_NEW, GOM_NEW, BNG_NEW, YAH_NEW]
for i, df in enumerate(dataframes):
if i == 0:
blob = js.Blob.new([df.to_csv(index = report, sep='|')], {"type": "text/csv"})
url = js.window.URL.createObjectURL(blob)
link = js.document.createElement("a")
link.href = url
link.setAttribute("download", filenames[i])
links.append(link)
blobs.append(blob)
else:
blob = js.Blob.new([df.to_csv(index=None, header=False, sep='|')], {"type": "text/csv"})
url = js.window.URL.createObjectURL(blob)
link = js.document.createElement("a")
link.href = url
link.setAttribute("download", filenames[i])
links.append(link)
blobs.append(blob)
for i, link in enumerate(links):
js.console.log(link)
link.click()
ASM_FTP_UN = str(os.getenv('ASM_UN'))
ASM_FTP_PW = str(os.getenv('ASM_PW')) | ASMdatareportASM | /ASMdatareportASM-0.1-py3-none-any.whl/ASMdatareportASM-0.1.data/scripts/DATA_REPORT_CLEANER.py | DATA_REPORT_CLEANER.py |
import logging
import logs.config_client_log
import argparse
import sys
import os
from Crypto.PublicKey import RSA
from PyQt5.QtWidgets import QApplication, QMessageBox
from common.variables import *
from common.errors import ServerError
from common.decos import log
from client.database import ClientDatabase
from client.transport import ClientTransport
from client.main_window import ClientMainWindow
from client.start_dialog import UserNameDialog
# Инициализация клиентского логера
logger = logging.getLogger('client')
# Парсер аргументов коммандной строки
@log
def arg_parser():
'''
Парсер аргументов командной строки, возвращает кортеж из 4 элементов
адрес сервера, порт, имя пользователя, пароль.
Выполняет проверку на корректность номера порта.
'''
parser = argparse.ArgumentParser()
parser.add_argument('addr', default=DEFAULT_IP_ADDRESS, nargs='?')
parser.add_argument('port', default=DEFAULT_PORT, type=int, nargs='?')
parser.add_argument('-n', '--name', default=None, nargs='?')
parser.add_argument('-p', '--password', default='', nargs='?')
namespace = parser.parse_args(sys.argv[1:])
server_address = namespace.addr
server_port = namespace.port
client_name = namespace.name
client_passwd = namespace.password
# проверим подходящий номер порта
if not 1023 < server_port < 65536:
logger.critical(
f'Попытка запуска клиента с неподходящим номером порта: {server_port}. Допустимы адреса с 1024 до 65535. Клиент завершается.')
exit(1)
return server_address, server_port, client_name, client_passwd
# Основная функция клиента
if __name__ == '__main__':
# Загружаем параметы коммандной строки
server_address, server_port, client_name, client_passwd = arg_parser()
# Создаём клиентокое приложение
client_app = QApplication(sys.argv)
# Если имя пользователя не было указано в командной строке то запросим его
start_dialog = UserNameDialog()
if not client_name or not client_passwd:
client_app.exec_()
# Если пользователь ввёл имя и нажал ОК, то сохраняем ведённое и
# удаляем объект, инааче выходим
if start_dialog.ok_pressed:
client_name = start_dialog.client_name.text()
client_passwd = start_dialog.client_passwd.text()
else:
exit(0)
# Записываем логи
logger.info(
f'Запущен клиент с парамертами: адрес сервера: {server_address} , порт: {server_port}, имя пользователя: {client_name}')
# Загружаем ключи с файла, если же файла нет, то генерируем новую пару.
dir_path = os.path.dirname(os.path.realpath(__file__))
key_file = os.path.join(dir_path, f'{client_name}.key')
if not os.path.exists(key_file):
keys = RSA.generate(2048, os.urandom)
with open(key_file, 'wb') as key:
key.write(keys.export_key())
else:
with open(key_file, 'rb') as key:
keys = RSA.import_key(key.read())
keys.publickey().export_key()
# Создаём объект базы данных
database = ClientDatabase(client_name)
# Создаём объект - транспорт и запускаем транспортный поток
try:
transport = ClientTransport(
server_port,
server_address,
database,
client_name,
client_passwd,
keys)
except ServerError as error:
message = QMessageBox()
message.critical(start_dialog, 'Ошибка сервера', error.text)
exit(1)
transport.setDaemon(True)
transport.start()
# Удалим объект диалога за ненадобностью
del start_dialog
# Создаём GUI
main_window = ClientMainWindow(database, transport, keys)
main_window.make_connection(transport)
main_window.setWindowTitle(f'Чат Программа alpha release - {client_name}')
client_app.exec_()
# Раз графическая оболочка закрылась, закрываем транспорт
transport.transport_shutdown()
transport.join() | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/client.py | client.py |
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton
import logging
logger = logging.getLogger('client')
class AddContactDialog(QDialog):
'''
Диалог добавления пользователя в список контактов.
Предлагает пользователю список возможных контактов и
добавляет выбранный в контакты.
'''
def __init__(self, transport, database):
super().__init__()
self.transport = transport
self.database = database
self.setFixedSize(350, 120)
self.setWindowTitle('Выберите контакт для добавления:')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel('Выберите контакт для добавления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_refresh = QPushButton('Обновить список', self)
self.btn_refresh.setFixedSize(100, 30)
self.btn_refresh.move(60, 60)
self.btn_ok = QPushButton('Добавить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
# Заполняем список возможных контактов
self.possible_contacts_update()
# Назначаем действие на кнопку обновить
self.btn_refresh.clicked.connect(self.update_possible_contacts)
def possible_contacts_update(self):
'''
Метод заполнения списка возможных контактов.
Создаёт список всех зарегистрированных пользователей
за исключением уже добавленных в контакты и самого себя.
'''
self.selector.clear()
# множества всех контактов и контактов клиента
contacts_list = set(self.database.get_contacts())
users_list = set(self.database.get_users())
# Удалим сами себя из списка пользователей, чтобы нельзя было добавить
# самого себя
users_list.remove(self.transport.username)
# Добавляем список возможных контактов
self.selector.addItems(users_list - contacts_list)
def update_possible_contacts(self):
'''
Метод обновления списка возможных контактов. Запрашивает с сервера
список известных пользователей и обносляет содержимое окна.
'''
try:
self.transport.user_list_update()
except OSError:
pass
else:
logger.debug('Обновление списка пользователей с сервера выполнено')
self.possible_contacts_update() | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/client/add_contact.py | add_contact.py |
import socket
import time
import logging
import json
import threading
import hashlib
import hmac
import binascii
from PyQt5.QtCore import pyqtSignal, QObject
from common.utils import *
from common.variables import *
from common.errors import ServerError
# Логер и объект блокировки для работы с сокетом.
logger = logging.getLogger('client')
socket_lock = threading.Lock()
class ClientTransport(threading.Thread, QObject):
'''
Класс реализующий транспортную подсистему клиентского
модуля. Отвечает за взаимодействие с сервером.
'''
# Сигналы новое сообщение и потеря соединения
new_message = pyqtSignal(dict)
message_205 = pyqtSignal()
connection_lost = pyqtSignal()
def __init__(self, port, ip_address, database, username, passwd, keys):
# Вызываем конструкторы предков
threading.Thread.__init__(self)
QObject.__init__(self)
# Класс База данных - работа с базой
self.database = database
# Имя пользователя
self.username = username
# Пароль
self.password = passwd
# Сокет для работы с сервером
self.transport = None
# Набор ключей для шифрования
self.keys = keys
# Устанавливаем соединение:
self.connection_init(port, ip_address)
# Обновляем таблицы известных пользователей и контактов
try:
self.user_list_update()
self.contacts_list_update()
except OSError as err:
if err.errno:
logger.critical(f'Потеряно соединение с сервером.')
raise ServerError('Потеряно соединение с сервером!')
logger.error(
'Timeout соединения при обновлении списков пользователей.')
except json.JSONDecodeError:
logger.critical(f'Потеряно соединение с сервером.')
raise ServerError('Потеряно соединение с сервером!')
# Флаг продолжения работы транспорта.
self.running = True
def connection_init(self, port, ip):
'''Метод отвечающий за устанновку соединения с сервером.'''
# Инициализация сокета и сообщение серверу о нашем появлении
self.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Таймаут необходим для освобождения сокета.
self.transport.settimeout(5)
# Соединяемся, 5 попыток соединения, флаг успеха ставим в True если
# удалось
connected = False
for i in range(5):
logger.info(f'Попытка подключения №{i + 1}')
try:
self.transport.connect((ip, port))
except (OSError, ConnectionRefusedError):
pass
else:
connected = True
break
time.sleep(1)
# Если соединится не удалось - исключение
if not connected:
logger.critical('Не удалось установить соединение с сервером')
raise ServerError('Не удалось установить соединение с сервером')
logger.debug('Установлено соединение с сервером')
# Запускаем процедуру авторизации
# Получаем хэш пароля
passwd_bytes = self.password.encode('utf-8')
salt = self.username.lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000)
passwd_hash_string = binascii.hexlify(passwd_hash)
# Получаем публичный ключ и декодируем его из байтов
pubkey = self.keys.publickey().export_key().decode('ascii')
# Авторизируемся на сервере
with socket_lock:
presense = {
ACTION: PRESENCE,
TIME: time.time(),
USER: {
ACCOUNT_NAME: self.username,
PUBLIC_KEY: pubkey
}
}
# Отправляем серверу приветственное сообщение.
try:
send_message(self.transport, presense)
ans = get_message(self.transport)
# Если сервер вернул ошибку, бросаем исключение.
if RESPONSE in ans:
if ans[RESPONSE] == 400:
raise ServerError(ans[ERROR])
elif ans[RESPONSE] == 511:
# Если всё нормально, то продолжаем процедуру
# авторизации.
ans_data = ans[DATA]
hash = hmac.new(
passwd_hash_string, ans_data.encode('utf-8'))
digest = hash.digest()
my_ans = RESPONSE_511
my_ans[DATA] = binascii.b2a_base64(
digest).decode('ascii')
send_message(self.transport, my_ans)
self.process_server_ans(get_message(self.transport))
except (OSError, json.JSONDecodeError):
raise ServerError('Сбой соединения в процессе авторизации.')
def process_server_ans(self, message):
'''Метод обработчик поступающих сообщений с сервера.'''
logger.debug(f'Разбор сообщения от сервера: {message}')
# Если это подтверждение чего-либо
if RESPONSE in message:
if message[RESPONSE] == 200:
return
elif message[RESPONSE] == 400:
raise ServerError(f'{message[ERROR]}')
elif message[RESPONSE] == 205:
self.user_list_update()
self.contacts_list_update()
self.message_205.emit()
else:
logger.error(
f'Принят неизвестный код подтверждения {message[RESPONSE]}')
# Если это сообщение от пользователя добавляем в базу, даём сигнал о
# новом сообщении
elif ACTION in message and message[ACTION] == MESSAGE and SENDER in message and DESTINATION in message \
and MESSAGE_TEXT in message and message[DESTINATION] == self.username:
logger.debug(
f'Получено сообщение от пользователя {message[SENDER]}:{message[MESSAGE_TEXT]}')
self.new_message.emit(message)
def contacts_list_update(self):
'''Метод обновляющий с сервера список контактов.'''
self.database.contacts_clear()
logger.debug(f'Запрос контакт листа для пользователся {self.name}')
req = {
ACTION: GET_CONTACTS,
TIME: time.time(),
USER: self.username
}
logger.debug(f'Сформирован запрос {req}')
with socket_lock:
send_message(self.transport, req)
ans = get_message(self.transport)
logger.debug(f'Получен ответ {ans}')
if RESPONSE in ans and ans[RESPONSE] == 202:
for contact in ans[LIST_INFO]:
self.database.add_contact(contact)
else:
logger.error('Не удалось обновить список контактов.')
def user_list_update(self):
'''Метод обновляющий с сервера список пользователей.'''
logger.debug(f'Запрос списка известных пользователей {self.username}')
req = {
ACTION: USERS_REQUEST,
TIME: time.time(),
ACCOUNT_NAME: self.username
}
with socket_lock:
send_message(self.transport, req)
ans = get_message(self.transport)
if RESPONSE in ans and ans[RESPONSE] == 202:
self.database.add_users(ans[LIST_INFO])
else:
logger.error('Не удалось обновить список известных пользователей.')
def key_request(self, user):
'''Метод запрашивающий с сервера публичный ключ пользователя.'''
logger.debug(f'Запрос публичного ключа для {user}')
req = {
ACTION: PUBLIC_KEY_REQUEST,
TIME: time.time(),
ACCOUNT_NAME: user
}
with socket_lock:
send_message(self.transport, req)
ans = get_message(self.transport)
if RESPONSE in ans and ans[RESPONSE] == 511:
return ans[DATA]
else:
logger.error(f'Не удалось получить ключ собеседника{user}.')
def add_contact(self, contact):
'''Метод отправляющий на сервер сведения о добавлении контакта.'''
logger.debug(f'Создание контакта {contact}')
req = {
ACTION: ADD_CONTACT,
TIME: time.time(),
USER: self.username,
ACCOUNT_NAME: contact
}
with socket_lock:
send_message(self.transport, req)
self.process_server_ans(get_message(self.transport))
def remove_contact(self, contact):
'''Метод отправляющий на сервер сведения о удалении контакта.'''
logger.debug(f'Удаление контакта {contact}')
req = {
ACTION: REMOVE_CONTACT,
TIME: time.time(),
USER: self.username,
ACCOUNT_NAME: contact
}
with socket_lock:
send_message(self.transport, req)
self.process_server_ans(get_message(self.transport))
def transport_shutdown(self):
'''Метод уведомляющий сервер о завершении работы клиента.'''
self.running = False
message = {
ACTION: EXIT,
TIME: time.time(),
ACCOUNT_NAME: self.username
}
with socket_lock:
try:
send_message(self.transport, message)
except OSError:
pass
logger.debug('Транспорт завершает работу.')
time.sleep(0.5)
def send_message(self, to, message):
'''Метод отправляющий на сервер сообщения для пользователя.'''
message_dict = {
ACTION: MESSAGE,
SENDER: self.username,
DESTINATION: to,
TIME: time.time(),
MESSAGE_TEXT: message
}
logger.debug(f'Сформирован словарь сообщения: {message_dict}')
# Необходимо дождаться освобождения сокета для отправки сообщения
with socket_lock:
send_message(self.transport, message_dict)
self.process_server_ans(get_message(self.transport))
logger.info(f'Отправлено сообщение для пользователя {to}')
def run(self):
'''Метод содержащий основной цикл работы транспортного потока.'''
logger.debug('Запущен процесс - приёмник собщений с сервера.')
while self.running:
# Отдыхаем секунду и снова пробуем захватить сокет.
# если не сделать тут задержку, то отправка может достаточно долго
# ждать освобождения сокета.
time.sleep(1)
message = None
with socket_lock:
try:
self.transport.settimeout(0.5)
message = get_message(self.transport)
except OSError as err:
if err.errno:
logger.critical(f'Потеряно соединение с сервером.')
self.running = False
self.connection_lost.emit()
# Проблемы с соединением
except (ConnectionError, ConnectionAbortedError, ConnectionResetError, json.JSONDecodeError, TypeError):
logger.debug(f'Потеряно соединение с сервером.')
self.running = False
self.connection_lost.emit()
finally:
self.transport.settimeout(5)
# Если сообщение получено, то вызываем функцию обработчик:
if message:
logger.debug(f'Принято сообщение с сервера: {message}')
self.process_server_ans(message) | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/client/transport.py | transport.py |
from PyQt5.QtWidgets import QMainWindow, qApp, QMessageBox, QApplication, QListView
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor
from PyQt5.QtCore import pyqtSlot, QEvent, Qt
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
import json
import logging
import base64
from client.main_window_conv import Ui_MainClientWindow
from client.add_contact import AddContactDialog
from client.del_contact import DelContactDialog
from common.errors import ServerError
from common.variables import *
logger = logging.getLogger('client')
class ClientMainWindow(QMainWindow):
'''
Класс - основное окно пользователя.
Содержит всю основную логику работы клиентского модуля.
Конфигурация окна создана в QTDesigner и загружается из
конвертированого файла main_window_conv.py
'''
def __init__(self, database, transport, keys):
super().__init__()
# основные переменные
self.database = database
self.transport = transport
# объект - дешифорвщик сообщений с предзагруженным ключём
self.decrypter = PKCS1_OAEP.new(keys)
# Загружаем конфигурацию окна из дизайнера
self.ui = Ui_MainClientWindow()
self.ui.setupUi(self)
# Кнопка "Выход"
self.ui.menu_exit.triggered.connect(qApp.exit)
# Кнопка отправить сообщение
self.ui.btn_send.clicked.connect(self.send_message)
# "добавить контакт"
self.ui.btn_add_contact.clicked.connect(self.add_contact_window)
self.ui.menu_add_contact.triggered.connect(self.add_contact_window)
# Удалить контакт
self.ui.btn_remove_contact.clicked.connect(self.delete_contact_window)
self.ui.menu_del_contact.triggered.connect(self.delete_contact_window)
# Дополнительные требующиеся атрибуты
self.contacts_model = None
self.history_model = None
self.messages = QMessageBox()
self.current_chat = None
self.current_chat_key = None
self.encryptor = None
self.ui.list_messages.setHorizontalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.list_messages.setWordWrap(True)
# Даблклик по листу контактов отправляется в обработчик
self.ui.list_contacts.doubleClicked.connect(self.select_active_user)
self.clients_list_update()
self.set_disabled_input()
self.show()
def set_disabled_input(self):
''' Метод делающий поля ввода неактивными'''
# Надпись - получатель.
self.ui.label_new_message.setText(
'Для выбора получателя дважды кликните на нем в окне контактов.')
self.ui.text_message.clear()
if self.history_model:
self.history_model.clear()
# Поле ввода и кнопка отправки неактивны до выбора получателя.
self.ui.btn_clear.setDisabled(True)
self.ui.btn_send.setDisabled(True)
self.ui.text_message.setDisabled(True)
self.encryptor = None
self.current_chat = None
self.current_chat_key = None
def history_list_update(self):
'''
Метод заполняющий соответствующий QListView
историей переписки с текущим собеседником.
'''
# Получаем историю сортированную по дате
list = sorted(
self.database.get_history(
self.current_chat),
key=lambda item: item[3])
# Если модель не создана, создадим.
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
# Очистим от старых записей
self.history_model.clear()
# Берём не более 20 последних записей.
length = len(list)
start_index = 0
if length > 20:
start_index = length - 20
# Заполнение модели записями, так-же стоит разделить входящие
# и исходящие выравниванием и разным фоном.
# отображает только последие 20 сообщений
for i in range(start_index, length):
item = list[i]
if item[1] == 'in':
mess = QStandardItem(
f'Входящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
else:
mess = QStandardItem(
f'Исходящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setTextAlignment(Qt.AlignRight)
mess.setBackground(QBrush(QColor(204, 255, 204)))
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
def select_active_user(self):
'''Метод обработчик события двойного клика по списку контактов.'''
# Выбранный пользователем (даблклик) находится в выделеном элементе в
# QListView
self.current_chat = self.ui.list_contacts.currentIndex().data()
# вызываем основную функцию
self.set_active_user()
def set_active_user(self):
'''Метод активации чата с собеседником.'''
# Запрашиваем публичный ключ пользователя и создаём объект шифрования
try:
self.current_chat_key = self.transport.key_request(
self.current_chat)
logger.debug(f'Загружен открытый ключ для {self.current_chat}')
if self.current_chat_key:
self.encryptor = PKCS1_OAEP.new(
RSA.import_key(self.current_chat_key))
except (OSError, json.JSONDecodeError):
self.current_chat_key = None
self.encryptor = None
logger.debug(f'Не удалось получить ключ для {self.current_chat}')
# Если ключа нет то ошибка, что не удалось начать чат с пользователем
if not self.current_chat_key:
self.messages.warning(
self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.')
return
# Ставим надпись и активируем кнопки
self.ui.label_new_message.setText(
f'Введите сообщенние для {self.current_chat}:')
self.ui.btn_clear.setDisabled(False)
self.ui.btn_send.setDisabled(False)
self.ui.text_message.setDisabled(False)
# Заполняем окно историю сообщений по требуемому пользователю.
self.history_list_update()
def clients_list_update(self):
'''Метод обновляющий список контактов.'''
contacts_list = self.database.get_contacts()
self.contacts_model = QStandardItemModel()
for i in sorted(contacts_list):
item = QStandardItem(i)
item.setEditable(False)
self.contacts_model.appendRow(item)
self.ui.list_contacts.setModel(self.contacts_model)
def add_contact_window(self):
'''Метод создающий окно - диалог добавления контакта'''
global select_dialog
select_dialog = AddContactDialog(self.transport, self.database)
select_dialog.btn_ok.clicked.connect(
lambda: self.add_contact_action(select_dialog))
select_dialog.show()
def add_contact_action(self, item):
'''Метод обработчк нажатия кнопки "Добавить"'''
new_contact = item.selector.currentText()
self.add_contact(new_contact)
item.close()
def add_contact(self, new_contact):
'''
Метод добавляющий контакт в серверную и клиентсткую BD.
После обновления баз данных обновляет и содержимое окна.
'''
try:
self.transport.add_contact(new_contact)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.add_contact(new_contact)
new_contact = QStandardItem(new_contact)
new_contact.setEditable(False)
self.contacts_model.appendRow(new_contact)
logger.info(f'Успешно добавлен контакт {new_contact}')
self.messages.information(
self, 'Успех', 'Контакт успешно добавлен.')
def delete_contact_window(self):
'''Метод создающий окно удаления контакта.'''
global remove_dialog
remove_dialog = DelContactDialog(self.database)
remove_dialog.btn_ok.clicked.connect(
lambda: self.delete_contact(remove_dialog))
remove_dialog.show()
def delete_contact(self, item):
'''
Метод удаляющий контакт из серверной и клиентсткой BD.
После обновления баз данных обновляет и содержимое окна.
'''
selected = item.selector.currentText()
try:
self.transport.remove_contact(selected)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.del_contact(selected)
self.clients_list_update()
logger.info(f'Успешно удалён контакт {selected}')
self.messages.information(self, 'Успех', 'Контакт успешно удалён.')
item.close()
# Если удалён активный пользователь, то деактивируем поля ввода.
if selected == self.current_chat:
self.current_chat = None
self.set_disabled_input()
def send_message(self):
'''
Функция отправки сообщения текущему собеседнику.
Реализует шифрование сообщения и его отправку.
'''
# Текст в поле, проверяем что поле не пустое затем забирается сообщение
# и поле очищается
message_text = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
if not message_text:
return
# Шифруем сообщение ключом получателя и упаковываем в base64.
message_text_encrypted = self.encryptor.encrypt(
message_text.encode('utf8'))
message_text_encrypted_base64 = base64.b64encode(
message_text_encrypted)
try:
self.transport.send_message(
self.current_chat,
message_text_encrypted_base64.decode('ascii'))
pass
except ServerError as err:
self.messages.critical(self, 'Ошибка', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
except (ConnectionResetError, ConnectionAbortedError):
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
else:
self.database.save_message(self.current_chat, 'out', message_text)
logger.debug(
f'Отправлено сообщение для {self.current_chat}: {message_text}')
self.history_list_update()
@pyqtSlot(dict)
def message(self, message):
'''
Слот обработчик поступаемых сообщений, выполняет дешифровку
поступаемых сообщений и их сохранение в истории сообщений.
Запрашивает пользователя если пришло сообщение не от текущего
собеседника. При необходимости меняет собеседника.
'''
# Получаем строку байтов
encrypted_message = base64.b64decode(message[MESSAGE_TEXT])
# Декодируем строку, при ошибке выдаём сообщение и завершаем функцию
try:
decrypted_message = self.decrypter.decrypt(encrypted_message)
except (ValueError, TypeError):
self.messages.warning(
self, 'Ошибка', 'Не удалось декодировать сообщение.')
return
# Сохраняем сообщение в базу и обновляем историю сообщений или
# открываем новый чат.
self.database.save_message(
self.current_chat,
'in',
decrypted_message.decode('utf8'))
sender = message[SENDER]
if sender == self.current_chat:
self.history_list_update()
else:
# Проверим есть ли такой пользователь у нас в контактах:
if self.database.check_contact(sender):
# Если есть, спрашиваем и желании открыть с ним чат и открываем
# при желании
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}, открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.current_chat = sender
self.set_active_user()
else:
print('NO')
# Раз нету,спрашиваем хотим ли добавить юзера в контакты.
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}.\n Данного пользователя нет в вашем контакт-листе.\n Добавить в контакты и открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.add_contact(sender)
self.current_chat = sender
# Нужно заново сохранить сообщение, иначе оно будет потеряно,
# т.к. на момент предыдущего вызова контакта не было.
self.database.save_message(
self.current_chat, 'in', decrypted_message.decode('utf8'))
self.set_active_user()
@pyqtSlot()
def connection_lost(self):
'''
Слот обработчик потери соеднинения с сервером.
Выдаёт окно предупреждение и завершает работу приложения.
'''
self.messages.warning(
self,
'Сбой соединения',
'Потеряно соединение с сервером. ')
self.close()
@pyqtSlot()
def sig_205(self):
'''
Слот выполняющий обновление баз данных по команде сервера.
'''
if self.current_chat and not self.database.check_user(
self.current_chat):
self.messages.warning(
self,
'Сочувствую',
'К сожалению собеседник был удалён с сервера.')
self.set_disabled_input()
self.current_chat = None
self.clients_list_update()
def make_connection(self, trans_obj):
'''Метод обеспечивающий соединение сигналов и слотов.'''
trans_obj.new_message.connect(self.message)
trans_obj.connection_lost.connect(self.connection_lost)
trans_obj.message_205.connect(self.sig_205) | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/client/main_window.py | main_window.py |
import datetime
from common.variables import *
from sqlalchemy import create_engine, Table, Column, Integer, String, Text, MetaData, DateTime
from sqlalchemy.sql import default_comparator
from sqlalchemy.orm import mapper, sessionmaker
import os
class ClientDatabase:
'''
Класс - оболочка для работы с базой данных клиента.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
'''
class KnownUsers:
'''
Класс - отображение для таблицы всех пользователей.
'''
def __init__(self, user):
self.id = None
self.username = user
class MessageStat:
'''
Класс - отображение для таблицы статистики переданных сообщений.
'''
def __init__(self, contact, direction, message):
self.id = None
self.contact = contact
self.direction = direction
self.message = message
self.date = datetime.datetime.now()
class Contacts:
'''
Класс - отображение для таблицы контактов.
'''
def __init__(self, contact):
self.id = None
self.name = contact
# Конструктор класса:
def __init__(self, name):
# Создаём движок базы данных, поскольку разрешено несколько
# клиентов одновременно, каждый должен иметь свою БД
# Поскольку клиент мультипоточный необходимо отключить
# проверки на подключения с разных потоков,
# иначе sqlite3.ProgrammingError
path = os.path.dirname(os.path.realpath(__file__))
filename = f'client_{name}.db3'
self.database_engine = create_engine(
f'sqlite:///{os.path.join(path, filename)}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу известных пользователей
users = Table('known_users', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String)
)
# Создаём таблицу истории сообщений
history = Table('message_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('contact', String),
Column('direction', String),
Column('message', Text),
Column('date', DateTime)
)
# Создаём таблицу контактов
contacts = Table('contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.KnownUsers, users)
mapper(self.MessageStat, history)
mapper(self.Contacts, contacts)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Необходимо очистить таблицу контактов, т.к. при запуске они
# подгружаются с сервера.
self.session.query(self.Contacts).delete()
self.session.commit()
def add_contact(self, contact):
'''Метод добавляющий контакт в базу данных.'''
if not self.session.query(
self.Contacts).filter_by(
name=contact).count():
contact_row = self.Contacts(contact)
self.session.add(contact_row)
self.session.commit()
def contacts_clear(self):
'''Метод очищающий таблицу со списком контактов.'''
self.session.query(self.Contacts).delete()
def del_contact(self, contact):
'''Метод удаляющий определённый контакт.'''
self.session.query(self.Contacts).filter_by(name=contact).delete()
def add_users(self, users_list):
'''Метод заполняющий таблицу известных пользователей.'''
self.session.query(self.KnownUsers).delete()
for user in users_list:
user_row = self.KnownUsers(user)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact, direction, message):
'''Метод сохраняющий сообщение в базе данных.'''
message_row = self.MessageStat(contact, direction, message)
self.session.add(message_row)
self.session.commit()
def get_contacts(self):
'''Метод возвращающий список всех контактов.'''
return [contact[0]
for contact in self.session.query(self.Contacts.name).all()]
def get_users(self):
'''Метод возвращающий список всех известных пользователей.'''
return [user[0]
for user in self.session.query(self.KnownUsers.username).all()]
def check_user(self, user):
'''Метод проверяющий существует ли пользователь.'''
if self.session.query(
self.KnownUsers).filter_by(
username=user).count():
return True
else:
return False
def check_contact(self, contact):
'''Метод проверяющий существует ли контакт.'''
if self.session.query(self.Contacts).filter_by(name=contact).count():
return True
else:
return False
def get_history(self, contact):
'''Метод возвращающий историю сообщений с определённым пользователем.'''
query = self.session.query(
self.MessageStat).filter_by(
contact=contact)
return [(history_row.contact,
history_row.direction,
history_row.message,
history_row.date) for history_row in query.all()]
# отладка
if __name__ == '__main__':
test_db = ClientDatabase('test1')
# for i in ['test3', 'test4', 'test5']:
# test_db.add_contact(i)
# test_db.add_contact('test4')
# test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5'])
# test_db.save_message('test2', 'in', f'Привет! я тестовое сообщение от {datetime.datetime.now()}!')
# test_db.save_message('test2', 'out', f'Привет! я другое тестовое сообщение от {datetime.datetime.now()}!')
# print(test_db.get_contacts())
# print(test_db.get_users())
# print(test_db.check_user('test1'))
# print(test_db.check_user('test10'))
print(sorted(test_db.get_history('test2'), key=lambda item: item[3]))
# test_db.del_contact('test4')
# print(test_db.get_contacts()) | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/client/database.py | database.py |
import socket
import logging
import logs.config_client_log
import logs.config_server_log
import sys
sys.path.append('../')
# метод определения модуля, источника запуска.
if sys.argv[0].find('client') == -1:
# если не клиент то сервер!
logger = logging.getLogger('server')
else:
# иначе сервер
logger = logging.getLogger('client')
def log(func_to_log):
'''
Декоратор, выполняющий логирование вызовов функций.
Сохраняет события типа debug, содержащие
информацию о имени вызываемой функиции, параметры с которыми
вызывается функция, и модуль, вызывающий функцию.
'''
def log_saver(*args, **kwargs):
logger.debug(
f'Была вызвана функция {func_to_log.__name__} c параметрами {args} , {kwargs}. Вызов из модуля {func_to_log.__module__}')
ret = func_to_log(*args, **kwargs)
return ret
return log_saver
def login_required(func):
'''
Декоратор, проверяющий, что клиент авторизован на сервере.
Проверяет, что передаваемый объект сокета находится в
списке авторизованных клиентов.
За исключением передачи словаря-запроса
на авторизацию. Если клиент не авторизован,
генерирует исключение TypeError
'''
def checker(*args, **kwargs):
# проверяем, что первый аргумент - экземпляр MessageProcessor
# Импортить необходимо тут, иначе ошибка рекурсивного импорта.
from server.core import MessageProcessor
from common.variables import ACTION, PRESENCE
if isinstance(args[0], MessageProcessor):
found = False
for arg in args:
if isinstance(arg, socket.socket):
# Проверяем, что данный сокет есть в списке names класса
# MessageProcessor
for client in args[0].names:
if args[0].names[client] == arg:
found = True
# Теперь надо проверить, что передаваемые аргументы не presence
# сообщение. Если presense, то разрешаем
for arg in args:
if isinstance(arg, dict):
if ACTION in arg and arg[ACTION] == PRESENCE:
found = True
# Если не не авторизован и не сообщение начала авторизации, то
# вызываем исключение.
if not found:
raise TypeError
return func(*args, **kwargs)
return checker | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/common/decos.py | decos.py |
import dis
class ServerMaker(type):
'''
Метакласс, проверяющий что в результирующем классе нет клиентских
вызовов таких как: connect. Также проверяется, что серверный
сокет является TCP и работает по IPv4 протоколу.
'''
def __init__(cls, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
# Атрибуты, вызываемые функциями классов
attrs = []
for func in clsdict:
# Пробуем
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы и
# атрибуты.
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
elif i.opname == 'LOAD_ATTR':
if i.argval not in attrs:
attrs.append(i.argval)
# Если обнаружено использование недопустимого метода connect,
# генерируем исключение:
if 'connect' in methods:
raise TypeError(
'Использование метода connect недопустимо в серверном классе')
# Если сокет не инициализировался константами SOCK_STREAM(TCP)
# AF_INET(IPv4), тоже исключение.
if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs):
raise TypeError('Некорректная инициализация сокета.')
super().__init__(clsname, bases, clsdict)
class ClientMaker(type):
'''
Метакласс, проверяющий что в результирующем классе нет серверных
вызовов таких как: accept, listen. Также проверяется, что сокет не
создаётся внутри конструктора класса.
'''
def __init__(cls, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
for func in clsdict:
# Пробуем
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы.
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
# Если обнаружено использование недопустимого метода accept, listen,
# socket бросаем исключение:
for command in ('accept', 'listen', 'socket'):
if command in methods:
raise TypeError(
'В классе обнаружено использование запрещённого метода')
# Вызов get_message или send_message из utils считаем корректным
# использованием сокетов
if 'get_message' in methods or 'send_message' in methods:
pass
else:
raise TypeError(
'Отсутствуют вызовы функций, работающих с сокетами.')
super().__init__(clsname, bases, clsdict) | ASPER_Messenger_Client | /ASPER_Messenger_Client-0.11.1.tar.gz/ASPER_Messenger_Client-0.11.1/client/common/metaclasses.py | metaclasses.py |
import sys
import os
import argparse
import logging
import configparser
import logs.config_server_log
from common.utils import *
from common.decos import log
from server.core import MessageProcessor
from server.database import ServerStorage
from server.main_window import MainWindow
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
# Инициализация логирования сервера.
logger = logging.getLogger('server')
@log
def arg_parser(default_port, default_address):
'''Парсер аргументов коммандной строки.'''
logger.debug(
f'Инициализация парсера аргументов коммандной строки: {sys.argv}')
parser = argparse.ArgumentParser()
parser.add_argument('-p', default=default_port, type=int, nargs='?')
parser.add_argument('-a', default=default_address, nargs='?')
parser.add_argument('--no_gui', action='store_true')
namespace = parser.parse_args(sys.argv[1:])
listen_address = namespace.a
listen_port = namespace.p
gui_flag = namespace.no_gui
logger.debug('Аргументы успешно загружены.')
return listen_address, listen_port, gui_flag
@log
def config_load():
'''Парсер конфигурационного ini файла.'''
config = configparser.ConfigParser()
dir_path = os.path.dirname(os.path.realpath(__file__))
config.read(f"{dir_path}/{'server.ini'}")
# Если конфиг файл загружен правильно, запускаемся, иначе конфиг по
# умолчанию.
if 'SETTINGS' in config:
return config
else:
config.add_section('SETTINGS')
config.set('SETTINGS', 'Default_port', str(DEFAULT_PORT))
config.set('SETTINGS', 'Listen_Address', '')
config.set('SETTINGS', 'Database_path', '')
config.set('SETTINGS', 'Database_file', 'server_database.db3')
return config
@log
def main():
'''Основная функция'''
# Загрузка файла конфигурации сервера
config = config_load()
# Загрузка параметров командной строки, если нет параметров, то задаём
# значения по умоланию.
listen_address, listen_port, gui_flag = arg_parser(
config['SETTINGS']['Default_port'], config['SETTINGS']['Listen_Address'])
# Инициализация базы данных
database = ServerStorage(
os.path.join(
config['SETTINGS']['Database_path'],
config['SETTINGS']['Database_file']))
# Создание экземпляра класса - сервера и его запуск:
server = MessageProcessor(listen_address, listen_port, database)
server.daemon = True
server.start()
# Если указан параметр без GUI то запускаем простенький обработчик
# консольного ввода
if gui_flag:
while True:
command = input('Введите exit для завершения работы сервера.')
if command == 'exit':
# Если выход, то завршаем основной цикл сервера.
server.running = False
server.join()
break
# Если не указан запуск без GUI, то запускаем GUI:
else:
# Создаём графическое окуружение для сервера:
server_app = QApplication(sys.argv)
server_app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
main_window = MainWindow(database, server, config)
# Запускаем GUI
server_app.exec_()
# По закрытию окон останавливаем обработчик сообщений
server.running = False
if __name__ == '__main__':
main() | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/server.py | server.py |
from PyQt5.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, QFileDialog, QMessageBox
from PyQt5.QtCore import Qt
import os
class ConfigWindow(QDialog):
'''Класс окно настроек.'''
def __init__(self, config):
super().__init__()
self.config = config
self.initUI()
def initUI(self):
'''Настройки окна'''
self.setFixedSize(365, 260)
self.setWindowTitle('Настройки сервера')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
# Надпись о файле базы данных:
self.db_path_label = QLabel('Путь до файла базы данных: ', self)
self.db_path_label.move(10, 10)
self.db_path_label.setFixedSize(240, 15)
# Строка с путём базы
self.db_path = QLineEdit(self)
self.db_path.setFixedSize(250, 20)
self.db_path.move(10, 30)
self.db_path.setReadOnly(True)
# Кнопка выбора пути.
self.db_path_select = QPushButton('Обзор...', self)
self.db_path_select.move(275, 28)
# Метка с именем поля файла базы данных
self.db_file_label = QLabel('Имя файла базы данных: ', self)
self.db_file_label.move(10, 68)
self.db_file_label.setFixedSize(180, 15)
# Поле для ввода имени файла
self.db_file = QLineEdit(self)
self.db_file.move(200, 66)
self.db_file.setFixedSize(150, 20)
# Метка с номером порта
self.port_label = QLabel('Номер порта для соединений:', self)
self.port_label.move(10, 108)
self.port_label.setFixedSize(180, 15)
# Поле для ввода номера порта
self.port = QLineEdit(self)
self.port.move(200, 108)
self.port.setFixedSize(150, 20)
# Метка с адресом для соединений
self.ip_label = QLabel('С какого IP принимаем соединения:', self)
self.ip_label.move(10, 148)
self.ip_label.setFixedSize(180, 15)
# Метка с напоминанием о пустом поле.
self.ip_label_note = QLabel(
' оставьте это поле пустым, чтобы\n принимать соединения с любых адресов.',
self)
self.ip_label_note.move(10, 168)
self.ip_label_note.setFixedSize(500, 30)
# Поле для ввода ip
self.ip = QLineEdit(self)
self.ip.move(200, 148)
self.ip.setFixedSize(150, 20)
# Кнопка сохранения настроек
self.save_btn = QPushButton('Сохранить', self)
self.save_btn.move(190, 220)
# Кнапка закрытия окна
self.close_button = QPushButton('Закрыть', self)
self.close_button.move(275, 220)
self.close_button.clicked.connect(self.close)
self.db_path_select.clicked.connect(self.open_file_dialog)
self.show()
self.db_path.insert(self.config['SETTINGS']['Database_path'])
self.db_file.insert(self.config['SETTINGS']['Database_file'])
self.port.insert(self.config['SETTINGS']['Default_port'])
self.ip.insert(self.config['SETTINGS']['Listen_Address'])
self.save_btn.clicked.connect(self.save_server_config)
def open_file_dialog(self):
'''Метод обработчик открытия окна выбора папки.'''
global dialog
dialog = QFileDialog(self)
path = dialog.getExistingDirectory()
path = path.replace('/', '\\')
self.db_path.clear()
self.db_path.insert(path)
def save_server_config(self):
'''
Метод сохранения настроек.
Проверяет правильность введённых данных и
если всё правильно сохраняет ini файл.
'''
global config_window
message = QMessageBox()
self.config['SETTINGS']['Database_path'] = self.db_path.text()
self.config['SETTINGS']['Database_file'] = self.db_file.text()
try:
port = int(self.port.text())
except ValueError:
message.warning(self, 'Ошибка', 'Порт должен быть числом')
else:
self.config['SETTINGS']['Listen_Address'] = self.ip.text()
if 1023 < port < 65536:
self.config['SETTINGS']['Default_port'] = str(port)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, '..')
with open(f"{dir_path}/{'server.ini'}", 'w') as conf:
self.config.write(conf)
message.information(
self, 'OK', 'Настройки успешно сохранены!')
else:
message.warning(
self, 'Ошибка', 'Порт должен быть от 1024 до 65536') | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/server/config_window.py | config_window.py |
import threading
import logging
import select
import socket
import json
import hmac
import binascii
import os
from common.metaclasses import ServerMaker
from common.descryptors import Port
from common.variables import *
from common.utils import send_message, get_message
from common.decos import login_required
# Загрузка логера
logger = logging.getLogger('server')
class MessageProcessor(threading.Thread):
'''
Основной класс сервера. Принимает содинения, словари - пакеты
от клиентов, обрабатывает поступающие сообщения.
Работает в качестве отдельного потока.
'''
port = Port()
def __init__(self, listen_address, listen_port, database):
# Параментры подключения
self.addr = listen_address
self.port = listen_port
# База данных сервера
self.database = database
# Сокет, через который будет осуществляться работа
self.sock = None
# Список подключённых клиентов.
self.clients = []
# Сокеты
self.listen_sockets = None
self.error_sockets = None
# Флаг продолжения работы
self.running = True
# Словарь содержащий сопоставленные имена и соответствующие им сокеты.
self.names = dict()
# Конструктор предка
super().__init__()
def run(self):
'''Метод основной цикл потока.'''
# Инициализация Сокета
self.init_socket()
# Основной цикл программы сервера
while self.running:
# Ждём подключения, если таймаут вышел, ловим исключение.
try:
client, client_address = self.sock.accept()
except OSError:
pass
else:
logger.info(f'Установлено соедение с ПК {client_address}')
client.settimeout(5)
self.clients.append(client)
recv_data_lst = []
send_data_lst = []
err_lst = []
# Проверяем на наличие ждущих клиентов
try:
if self.clients:
recv_data_lst, self.listen_sockets, self.error_sockets = select.select(
self.clients, self.clients, [], 0)
except OSError as err:
logger.error(f'Ошибка работы с сокетами: {err.errno}')
# принимаем сообщения и если ошибка, исключаем клиента.
if recv_data_lst:
for client_with_message in recv_data_lst:
try:
self.process_client_message(
get_message(client_with_message), client_with_message)
except (OSError, json.JSONDecodeError, TypeError):
self.remove_client(client_with_message)
def remove_client(self, client):
'''
Метод обработчик клиента с которым прервана связь.
Ищет клиента и удаляет его из списков и базы:
'''
logger.info(f'Клиент {client.getpeername()} отключился от сервера.')
for name in self.names:
if self.names[name] == client:
self.database.user_logout(name)
del self.names[name]
break
self.clients.remove(client)
client.close()
def init_socket(self):
'''Метод инициализатор сокета.'''
logger.info(
f'Запущен сервер, порт для подключений: {self.port} , адрес с которого принимаются подключения: {self.addr}. Если адрес не указан, принимаются соединения с любых адресов.')
# Готовим сокет
transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.bind((self.addr, self.port))
transport.settimeout(0.5)
# Начинаем слушать сокет.
self.sock = transport
self.sock.listen(MAX_CONNECTIONS)
def process_message(self, message):
'''
Метод отправки сообщения клиенту.
'''
if message[DESTINATION] in self.names and self.names[message[DESTINATION]
] in self.listen_sockets:
try:
send_message(self.names[message[DESTINATION]], message)
logger.info(
f'Отправлено сообщение пользователю {message[DESTINATION]} от пользователя {message[SENDER]}.')
except OSError:
self.remove_client(message[DESTINATION])
elif message[DESTINATION] in self.names and self.names[message[DESTINATION]] not in self.listen_sockets:
logger.error(
f'Связь с клиентом {message[DESTINATION]} была потеряна. Соединение закрыто, доставка невозможна.')
self.remove_client(self.names[message[DESTINATION]])
else:
logger.error(
f'Пользователь {message[DESTINATION]} не зарегистрирован на сервере, отправка сообщения невозможна.')
@login_required
def process_client_message(self, message, client):
'''Метод отбработчик поступающих сообщений.'''
logger.debug(f'Разбор сообщения от клиента : {message}')
# Если это сообщение о присутствии, принимаем и отвечаем
if ACTION in message and message[ACTION] == PRESENCE and TIME in message and USER in message:
# Если сообщение о присутствии то вызываем функцию авторизации.
self.autorize_user(message, client)
# Если это сообщение, то отправляем его получателю.
elif ACTION in message and message[ACTION] == MESSAGE and DESTINATION in message and TIME in message \
and SENDER in message and MESSAGE_TEXT in message and self.names[message[SENDER]] == client:
if message[DESTINATION] in self.names:
self.database.process_message(
message[SENDER], message[DESTINATION])
self.process_message(message)
try:
send_message(client, RESPONSE_200)
except OSError:
self.remove_client(client)
else:
response = RESPONSE_400
response[ERROR] = 'Пользователь не зарегистрирован на сервере.'
try:
send_message(client, response)
except OSError:
pass
return
# Если клиент выходит
elif ACTION in message and message[ACTION] == EXIT and ACCOUNT_NAME in message \
and self.names[message[ACCOUNT_NAME]] == client:
self.remove_client(client)
# Если это запрос контакт-листа
elif ACTION in message and message[ACTION] == GET_CONTACTS and USER in message and \
self.names[message[USER]] == client:
response = RESPONSE_202
response[LIST_INFO] = self.database.get_contacts(message[USER])
try:
send_message(client, response)
except OSError:
self.remove_client(client)
# Если это добавление контакта
elif ACTION in message and message[ACTION] == ADD_CONTACT and ACCOUNT_NAME in message and USER in message \
and self.names[message[USER]] == client:
self.database.add_contact(message[USER], message[ACCOUNT_NAME])
try:
send_message(client, RESPONSE_200)
except OSError:
self.remove_client(client)
# Если это удаление контакта
elif ACTION in message and message[ACTION] == REMOVE_CONTACT and ACCOUNT_NAME in message and USER in message \
and self.names[message[USER]] == client:
self.database.remove_contact(message[USER], message[ACCOUNT_NAME])
try:
send_message(client, RESPONSE_200)
except OSError:
self.remove_client(client)
# Если это запрос известных пользователей
elif ACTION in message and message[ACTION] == USERS_REQUEST and ACCOUNT_NAME in message \
and self.names[message[ACCOUNT_NAME]] == client:
response = RESPONSE_202
response[LIST_INFO] = [user[0]
for user in self.database.users_list()]
try:
send_message(client, response)
except OSError:
self.remove_client(client)
# Если это запрос публичного ключа пользователя
elif ACTION in message and message[ACTION] == PUBLIC_KEY_REQUEST and ACCOUNT_NAME in message:
response = RESPONSE_511
response[DATA] = self.database.get_pubkey(message[ACCOUNT_NAME])
# может быть, что ключа ещё нет (пользователь никогда не логинился,
# тогда шлём 400)
if response[DATA]:
try:
send_message(client, response)
except OSError:
self.remove_client(client)
else:
response = RESPONSE_400
response[ERROR] = 'Нет публичного ключа для данного пользователя'
try:
send_message(client, response)
except OSError:
self.remove_client(client)
# Иначе отдаём Bad request
else:
response = RESPONSE_400
response[ERROR] = 'Запрос некорректен.'
try:
send_message(client, response)
except OSError:
self.remove_client(client)
def autorize_user(self, message, sock):
'''Метод реализующий авторизцию пользователей.'''
# Если имя пользователя уже занято то возвращаем 400
if message[USER][ACCOUNT_NAME] in self.names.keys():
response = RESPONSE_400
response[ERROR] = 'Имя пользователя уже занято.'
try:
send_message(sock, response)
except OSError:
pass
self.clients.remove(sock)
sock.close()
# Проверяем что пользователь зарегистрирован на сервере.
elif not self.database.check_user(message[USER][ACCOUNT_NAME]):
response = RESPONSE_400
response[ERROR] = 'Пользователь не зарегистрирован.'
try:
send_message(sock, response)
except OSError:
pass
self.clients.remove(sock)
sock.close()
else:
# Иначе отвечаем 511 и проводим процедуру авторизации
# Словарь - заготовка
message_auth = RESPONSE_511
# Набор байтов в hex представлении
random_str = binascii.hexlify(os.urandom(64))
# В словарь байты нельзя, декодируем (json.dumps -> TypeError)
message_auth[DATA] = random_str.decode('ascii')
# Создаём хэш пароля и связки с рандомной строкой, сохраняем
# серверную версию ключа
hash = hmac.new(
self.database.get_hash(
message[USER][ACCOUNT_NAME]),
random_str)
digest = hash.digest()
try:
# Обмен с клиентом
send_message(sock, message_auth)
ans = get_message(sock)
except OSError:
sock.close()
return
client_digest = binascii.a2b_base64(ans[DATA])
# Если ответ клиента корректный, то сохраняем его в список
# пользователей.
if RESPONSE in ans and ans[RESPONSE] == 511 and hmac.compare_digest(
digest, client_digest):
self.names[message[USER][ACCOUNT_NAME]] = sock
client_ip, client_port = sock.getpeername()
try:
send_message(sock, RESPONSE_200)
except OSError:
self.remove_client(message[USER][ACCOUNT_NAME])
# добавляем пользователя в список активных и если у него изменился открытый ключ
# сохраняем новый
self.database.user_login(
message[USER][ACCOUNT_NAME],
client_ip,
client_port,
message[USER][PUBLIC_KEY])
else:
response = RESPONSE_400
response[ERROR] = 'Неверный пароль.'
try:
send_message(sock, response)
except OSError:
pass
self.clients.remove(sock)
sock.close()
def service_update_lists(self):
'''Метод реализующий отправки сервисного сообщения 205 клиентам.'''
for client in self.names:
try:
send_message(self.names[client], RESPONSE_205)
except OSError:
self.remove_client(self.names[client]) | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/server/core.py | core.py |
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, QLabel, QMessageBox
from PyQt5.QtCore import Qt
import hashlib
import binascii
class RegisterUser(QDialog):
'''Класс диалог регистрации пользователя на сервере.'''
def __init__(self, database, server):
super().__init__()
self.database = database
self.server = server
self.setWindowTitle('Регистрация')
self.setFixedSize(175, 183)
self.setModal(True)
self.setAttribute(Qt.WA_DeleteOnClose)
self.label_username = QLabel('Введите имя пользователя:', self)
self.label_username.move(10, 10)
self.label_username.setFixedSize(150, 15)
self.client_name = QLineEdit(self)
self.client_name.setFixedSize(154, 20)
self.client_name.move(10, 30)
self.label_passwd = QLabel('Введите пароль:', self)
self.label_passwd.move(10, 55)
self.label_passwd.setFixedSize(150, 15)
self.client_passwd = QLineEdit(self)
self.client_passwd.setFixedSize(154, 20)
self.client_passwd.move(10, 75)
self.client_passwd.setEchoMode(QLineEdit.Password)
self.label_conf = QLabel('Введите подтверждение:', self)
self.label_conf.move(10, 100)
self.label_conf.setFixedSize(150, 15)
self.client_conf = QLineEdit(self)
self.client_conf.setFixedSize(154, 20)
self.client_conf.move(10, 120)
self.client_conf.setEchoMode(QLineEdit.Password)
self.btn_ok = QPushButton('Сохранить', self)
self.btn_ok.move(10, 150)
self.btn_ok.clicked.connect(self.save_data)
self.btn_cancel = QPushButton('Выход', self)
self.btn_cancel.move(90, 150)
self.btn_cancel.clicked.connect(self.close)
self.messages = QMessageBox()
self.show()
def save_data(self):
'''
Метод проверки правильности ввода и сохранения в базу нового пользователя.
'''
if not self.client_name.text():
self.messages.critical(
self, 'Ошибка', 'Не указано имя пользователя.')
return
elif self.client_passwd.text() != self.client_conf.text():
self.messages.critical(
self, 'Ошибка', 'Введённые пароли не совпадают.')
return
elif self.database.check_user(self.client_name.text()):
self.messages.critical(
self, 'Ошибка', 'Пользователь уже существует.')
return
else:
# Генерируем хэш пароля, в качестве соли будем использовать логин в
# нижнем регистре.
passwd_bytes = self.client_passwd.text().encode('utf-8')
salt = self.client_name.text().lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac(
'sha512', passwd_bytes, salt, 10000)
self.database.add_user(
self.client_name.text(),
binascii.hexlify(passwd_hash))
self.messages.information(
self, 'Успех', 'Пользователь успешно зарегистрирован.')
# Рассылаем клиентам сообщение о необходимости обновить справичники
self.server.service_update_lists()
self.close()
if __name__ == '__main__':
app = QApplication([])
app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
dial = RegisterUser(None)
app.exec_() | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/server/add_user.py | add_user.py |
from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QApplication, QLabel, QTableView
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import QTimer
from server.stat_window import StatWindow
from server.config_window import ConfigWindow
from server.add_user import RegisterUser
from server.remove_user import DelUserDialog
class MainWindow(QMainWindow):
'''Класс - основное окно сервера.'''
def __init__(self, database, server, config):
# Конструктор предка
super().__init__()
# База данных сервера
self.database = database
self.server_thread = server
self.config = config
# Ярлык выхода
self.exitAction = QAction('Выход', self)
self.exitAction.setShortcut('Ctrl+Q')
self.exitAction.triggered.connect(qApp.quit)
# Кнопка обновить список клиентов
self.refresh_button = QAction('Обновить список', self)
# Кнопка настроек сервера
self.config_btn = QAction('Настройки сервера', self)
# Кнопка регистрации пользователя
self.register_btn = QAction('Регистрация пользователя', self)
# Кнопка удаления пользователя
self.remove_btn = QAction('Удаление пользователя', self)
# Кнопка вывести историю сообщений
self.show_history_button = QAction('История клиентов', self)
# Статусбар
self.statusBar()
self.statusBar().showMessage('Server Working')
# Тулбар
self.toolbar = self.addToolBar('MainBar')
self.toolbar.addAction(self.exitAction)
self.toolbar.addAction(self.refresh_button)
self.toolbar.addAction(self.show_history_button)
self.toolbar.addAction(self.config_btn)
self.toolbar.addAction(self.register_btn)
self.toolbar.addAction(self.remove_btn)
# Настройки геометрии основного окна
# Поскольку работать с динамическими размерами мы не умеем, и мало
# времени на изучение, размер окна фиксирован.
self.setFixedSize(800, 600)
self.setWindowTitle('Messaging Server alpha release')
# Надпись о том, что ниже список подключённых клиентов
self.label = QLabel('Список подключённых клиентов:', self)
self.label.setFixedSize(240, 15)
self.label.move(10, 25)
# Окно со списком подключённых клиентов.
self.active_clients_table = QTableView(self)
self.active_clients_table.move(10, 45)
self.active_clients_table.setFixedSize(780, 400)
# Таймер, обновляющий список клиентов 1 раз в секунду
self.timer = QTimer()
self.timer.timeout.connect(self.create_users_model)
self.timer.start(1000)
# Связываем кнопки с процедурами
self.refresh_button.triggered.connect(self.create_users_model)
self.show_history_button.triggered.connect(self.show_statistics)
self.config_btn.triggered.connect(self.server_config)
self.register_btn.triggered.connect(self.reg_user)
self.remove_btn.triggered.connect(self.rem_user)
# Последним параметром отображаем окно.
self.show()
def create_users_model(self):
'''Метод заполняющий таблицу активных пользователей.'''
list_users = self.database.active_users_list()
list = QStandardItemModel()
list.setHorizontalHeaderLabels(
['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения'])
for row in list_users:
user, ip, port, time = row
user = QStandardItem(user)
user.setEditable(False)
ip = QStandardItem(ip)
ip.setEditable(False)
port = QStandardItem(str(port))
port.setEditable(False)
# Уберём милисекунды из строки времени, т.к. такая точность не
# требуется.
time = QStandardItem(str(time.replace(microsecond=0)))
time.setEditable(False)
list.appendRow([user, ip, port, time])
self.active_clients_table.setModel(list)
self.active_clients_table.resizeColumnsToContents()
self.active_clients_table.resizeRowsToContents()
def show_statistics(self):
'''Метод создающий окно со статистикой клиентов.'''
global stat_window
stat_window = StatWindow(self.database)
stat_window.show()
def server_config(self):
'''Метод создающий окно с настройками сервера.'''
global config_window
# Создаём окно и заносим в него текущие параметры
config_window = ConfigWindow(self.config)
def reg_user(self):
'''Метод создающий окно регистрации пользователя.'''
global reg_window
reg_window = RegisterUser(self.database, self.server_thread)
reg_window.show()
def rem_user(self):
'''Метод создающий окно удаления пользователя.'''
global rem_window
rem_window = DelUserDialog(self.database, self.server_thread)
rem_window.show() | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/server/main_window.py | main_window.py |
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Text
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.sql import default_comparator
import datetime
class ServerStorage:
'''
Класс - оболочка для работы с базой данных сервера.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
'''
class AllUsers:
'''Класс - отображение таблицы всех пользователей.'''
def __init__(self, username, passwd_hash):
self.name = username
self.last_login = datetime.datetime.now()
self.passwd_hash = passwd_hash
self.pubkey = None
self.id = None
class ActiveUsers:
'''Класс - отображение таблицы активных пользователей.'''
def __init__(self, user_id, ip_address, port, login_time):
self.user = user_id
self.ip_address = ip_address
self.port = port
self.login_time = login_time
self.id = None
class LoginHistory:
'''Класс - отображение таблицы истории входов.'''
def __init__(self, name, date, ip, port):
self.id = None
self.name = name
self.date_time = date
self.ip = ip
self.port = port
class UsersContacts:
'''Класс - отображение таблицы контактов пользователей.'''
def __init__(self, user, contact):
self.id = None
self.user = user
self.contact = contact
class UsersHistory:
'''Класс - отображение таблицы истории действий.'''
def __init__(self, user):
self.id = None
self.user = user
self.sent = 0
self.accepted = 0
def __init__(self, path):
# Создаём движок базы данных
self.database_engine = create_engine(
f'sqlite:///{path}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу пользователей
users_table = Table('Users', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True),
Column('last_login', DateTime),
Column('passwd_hash', String),
Column('pubkey', Text)
)
# Создаём таблицу активных пользователей
active_users_table = Table(
'Active_users', self.metadata, Column(
'id', Integer, primary_key=True), Column(
'user', ForeignKey('Users.id'), unique=True), Column(
'ip_address', String), Column(
'port', Integer), Column(
'login_time', DateTime))
# Создаём таблицу истории входов
user_login_history = Table('Login_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', ForeignKey('Users.id')),
Column('date_time', DateTime),
Column('ip', String),
Column('port', String)
)
# Создаём таблицу контактов пользователей
contacts = Table('Contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id')),
Column('contact', ForeignKey('Users.id'))
)
# Создаём таблицу статистики пользователей
users_history_table = Table('History', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id')),
Column('sent', Integer),
Column('accepted', Integer)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.AllUsers, users_table)
mapper(self.ActiveUsers, active_users_table)
mapper(self.LoginHistory, user_login_history)
mapper(self.UsersContacts, contacts)
mapper(self.UsersHistory, users_history_table)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Если в таблице активных пользователей есть записи, то их необходимо
# удалить
self.session.query(self.ActiveUsers).delete()
self.session.commit()
def user_login(self, username, ip_address, port, key):
'''
Метод выполняющийся при входе пользователя, записывает в базу факт входа
Обновляет открытый ключ пользователя при его изменении.
'''
# Запрос в таблицу пользователей на наличие там пользователя с таким
# именем
rez = self.session.query(self.AllUsers).filter_by(name=username)
# Если имя пользователя уже присутствует в таблице, обновляем время последнего входа
# и проверяем корректность ключа. Если клиент прислал новый ключ,
# сохраняем его.
if rez.count():
user = rez.first()
user.last_login = datetime.datetime.now()
if user.pubkey != key:
user.pubkey = key
# Если нету, то генерируем исключение
else:
raise ValueError('Пользователь не зарегистрирован.')
# Теперь можно создать запись в таблицу активных пользователей о факте
# входа.
new_active_user = self.ActiveUsers(
user.id, ip_address, port, datetime.datetime.now())
self.session.add(new_active_user)
# и сохранить в историю входов
history = self.LoginHistory(
user.id, datetime.datetime.now(), ip_address, port)
self.session.add(history)
# Сохрраняем изменения
self.session.commit()
def add_user(self, name, passwd_hash):
'''
Метод регистрации пользователя.
Принимает имя и хэш пароля, создаёт запись в таблице статистики.
'''
user_row = self.AllUsers(name, passwd_hash)
self.session.add(user_row)
self.session.commit()
history_row = self.UsersHistory(user_row.id)
self.session.add(history_row)
self.session.commit()
def remove_user(self, name):
'''Метод удаляющий пользователя из базы.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
self.session.query(self.LoginHistory).filter_by(name=user.id).delete()
self.session.query(self.UsersContacts).filter_by(user=user.id).delete()
self.session.query(
self.UsersContacts).filter_by(
contact=user.id).delete()
self.session.query(self.UsersHistory).filter_by(user=user.id).delete()
self.session.query(self.AllUsers).filter_by(name=name).delete()
self.session.commit()
def get_hash(self, name):
'''Метод получения хэша пароля пользователя.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.passwd_hash
def get_pubkey(self, name):
'''Метод получения публичного ключа пользователя.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.pubkey
def check_user(self, name):
'''Метод проверяющий существование пользователя.'''
if self.session.query(self.AllUsers).filter_by(name=name).count():
return True
else:
return False
def user_logout(self, username):
'''Метод фиксирующий отключения пользователя.'''
# Запрашиваем пользователя, что покидает нас
user = self.session.query(
self.AllUsers).filter_by(
name=username).first()
# Удаляем его из таблицы активных пользователей.
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
# Применяем изменения
self.session.commit()
def process_message(self, sender, recipient):
'''Метод записывающий в таблицу статистики факт передачи сообщения.'''
# Получаем ID отправителя и получателя
sender = self.session.query(
self.AllUsers).filter_by(
name=sender).first().id
recipient = self.session.query(
self.AllUsers).filter_by(
name=recipient).first().id
# Запрашиваем строки из истории и увеличиваем счётчики
sender_row = self.session.query(
self.UsersHistory).filter_by(
user=sender).first()
sender_row.sent += 1
recipient_row = self.session.query(
self.UsersHistory).filter_by(
user=recipient).first()
recipient_row.accepted += 1
self.session.commit()
def add_contact(self, user, contact):
'''Метод добавления контакта для пользователя.'''
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(name=user).first()
contact = self.session.query(
self.AllUsers).filter_by(
name=contact).first()
# Проверяем что не дубль и что контакт может существовать (полю
# пользователь мы доверяем)
if not contact or self.session.query(
self.UsersContacts).filter_by(
user=user.id,
contact=contact.id).count():
return
# Создаём объект и заносим его в базу
contact_row = self.UsersContacts(user.id, contact.id)
self.session.add(contact_row)
self.session.commit()
# Функция удаляет контакт из базы данных
def remove_contact(self, user, contact):
'''Метод удаления контакта пользователя.'''
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(name=user).first()
contact = self.session.query(
self.AllUsers).filter_by(
name=contact).first()
# Проверяем что контакт может существовать (полю пользователь мы
# доверяем)
if not contact:
return
# Удаляем требуемое
self.session.query(self.UsersContacts).filter(
self.UsersContacts.user == user.id,
self.UsersContacts.contact == contact.id
).delete()
self.session.commit()
def users_list(self):
'''Метод возвращающий список известных пользователей со временем последнего входа.'''
# Запрос строк таблицы пользователей.
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login
)
# Возвращаем список кортежей
return query.all()
def active_users_list(self):
'''Метод возвращающий список активных пользователей.'''
# Запрашиваем соединение таблиц и собираем кортежи имя, адрес, порт,
# время.
query = self.session.query(
self.AllUsers.name,
self.ActiveUsers.ip_address,
self.ActiveUsers.port,
self.ActiveUsers.login_time
).join(self.AllUsers)
# Возвращаем список кортежей
return query.all()
def login_history(self, username=None):
'''Метод возвращающий историю входов.'''
# Запрашиваем историю входа
query = self.session.query(self.AllUsers.name,
self.LoginHistory.date_time,
self.LoginHistory.ip,
self.LoginHistory.port
).join(self.AllUsers)
# Если было указано имя пользователя, то фильтруем по нему
if username:
query = query.filter(self.AllUsers.name == username)
# Возвращаем список кортежей
return query.all()
def get_contacts(self, username):
'''Метод возвращающий список контактов пользователя.'''
# Запрашивааем указанного пользователя
user = self.session.query(self.AllUsers).filter_by(name=username).one()
# Запрашиваем его список контактов
query = self.session.query(self.UsersContacts, self.AllUsers.name). \
filter_by(user=user.id). \
join(self.AllUsers, self.UsersContacts.contact == self.AllUsers.id)
# выбираем только имена пользователей и возвращаем их.
return [contact[1] for contact in query.all()]
def message_history(self):
'''Метод возвращающий статистику сообщений.'''
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login,
self.UsersHistory.sent,
self.UsersHistory.accepted
).join(self.AllUsers)
# Возвращаем список кортежей
return query.all()
# Отладка
if __name__ == '__main__':
test_db = ServerStorage('../server_database.db3')
test_db.user_login('test1', '192.168.1.113', 8080)
test_db.user_login('test2', '192.168.1.113', 8081)
print(test_db.users_list())
# print(test_db.active_users_list())
# test_db.user_logout('McG')
# print(test_db.login_history('re'))
# test_db.add_contact('test2', 'test1')
# test_db.add_contact('test1', 'test3')
# test_db.add_contact('test1', 'test6')
# test_db.remove_contact('test1', 'test3')
test_db.process_message('test1', 'test2')
print(test_db.message_history()) | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/server/database.py | database.py |
import socket
import logging
import logs.config_client_log
import logs.config_server_log
import sys
sys.path.append('../')
# метод определения модуля, источника запуска.
if sys.argv[0].find('client') == -1:
# если не клиент то сервер!
logger = logging.getLogger('server')
else:
# иначе сервер
logger = logging.getLogger('client')
def log(func_to_log):
'''
Декоратор, выполняющий логирование вызовов функций.
Сохраняет события типа debug, содержащие
информацию о имени вызываемой функиции, параметры с которыми
вызывается функция, и модуль, вызывающий функцию.
'''
def log_saver(*args, **kwargs):
logger.debug(
f'Была вызвана функция {func_to_log.__name__} c параметрами {args} , {kwargs}. Вызов из модуля {func_to_log.__module__}')
ret = func_to_log(*args, **kwargs)
return ret
return log_saver
def login_required(func):
'''
Декоратор, проверяющий, что клиент авторизован на сервере.
Проверяет, что передаваемый объект сокета находится в
списке авторизованных клиентов.
За исключением передачи словаря-запроса
на авторизацию. Если клиент не авторизован,
генерирует исключение TypeError
'''
def checker(*args, **kwargs):
# проверяем, что первый аргумент - экземпляр MessageProcessor
# Импортить необходимо тут, иначе ошибка рекурсивного импорта.
from server.core import MessageProcessor
from common.variables import ACTION, PRESENCE
if isinstance(args[0], MessageProcessor):
found = False
for arg in args:
if isinstance(arg, socket.socket):
# Проверяем, что данный сокет есть в списке names класса
# MessageProcessor
for client in args[0].names:
if args[0].names[client] == arg:
found = True
# Теперь надо проверить, что передаваемые аргументы не presence
# сообщение. Если presense, то разрешаем
for arg in args:
if isinstance(arg, dict):
if ACTION in arg and arg[ACTION] == PRESENCE:
found = True
# Если не не авторизован и не сообщение начала авторизации, то
# вызываем исключение.
if not found:
raise TypeError
return func(*args, **kwargs)
return checker | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/common/decos.py | decos.py |
import dis
class ServerMaker(type):
'''
Метакласс, проверяющий что в результирующем классе нет клиентских
вызовов таких как: connect. Также проверяется, что серверный
сокет является TCP и работает по IPv4 протоколу.
'''
def __init__(cls, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
# Атрибуты, вызываемые функциями классов
attrs = []
for func in clsdict:
# Пробуем
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы и
# атрибуты.
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
elif i.opname == 'LOAD_ATTR':
if i.argval not in attrs:
attrs.append(i.argval)
# Если обнаружено использование недопустимого метода connect,
# генерируем исключение:
if 'connect' in methods:
raise TypeError(
'Использование метода connect недопустимо в серверном классе')
# Если сокет не инициализировался константами SOCK_STREAM(TCP)
# AF_INET(IPv4), тоже исключение.
if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs):
raise TypeError('Некорректная инициализация сокета.')
super().__init__(clsname, bases, clsdict)
class ClientMaker(type):
'''
Метакласс, проверяющий что в результирующем классе нет серверных
вызовов таких как: accept, listen. Также проверяется, что сокет не
создаётся внутри конструктора класса.
'''
def __init__(cls, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
for func in clsdict:
# Пробуем
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы.
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
# Если обнаружено использование недопустимого метода accept, listen,
# socket бросаем исключение:
for command in ('accept', 'listen', 'socket'):
if command in methods:
raise TypeError(
'В классе обнаружено использование запрещённого метода')
# Вызов get_message или send_message из utils считаем корректным
# использованием сокетов
if 'get_message' in methods or 'send_message' in methods:
pass
else:
raise TypeError(
'Отсутствуют вызовы функций, работающих с сокетами.')
super().__init__(clsname, bases, clsdict) | ASPER_Messenger_Server | /ASPER_Messenger_Server-0.11.1.tar.gz/ASPER_Messenger_Server-0.11.1/server/common/metaclasses.py | metaclasses.py |
import Levenshtein as Lev
def calculate_cer(s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence (actual)
s2 (string): space-separated sentence (predicted)
"""
s1 = s1.replace(' ', '')
s2 = s2.replace(' ', '')
return Lev.distance(s1, s2)/len(s1)
def calculate_wer(s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2)) / len(s1.split())
def calculate_cer_list_pair(results):
"""
Arguments:
results (list): list of ground truth and
predicted sequence pairs.
Returns the CER for the full set.
"""
dist = sum(Lev.distance(label, pred)
for label, pred in results)
total = sum(len(label) for label, _ in results)
return dist / total
def compute_wer_list_pair(results):
dist = []
total_len = []
for label, pred in results:
#print("".join(label))
dist.append(wer("".join(label), "".join(pred)))
total_len.append(len("".join(label).split()))
return sum(dist)/sum(total_len)
def wer(s1,s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2)) | ASR-metrics | /ASR_metrics-1.0.12-py3-none-any.whl/ASR_metrics/utils.py | utils.py |
def remove_thumbs(path_to_folder_containing_images):
""" Remove Thumbs.db file from a given folder
Args:
path_to_folder_containing_images(str): path to folder containing images
"""
if len(glob(path_to_folder_containing_images + "/*.db", recursive = True)) > 0:
os.remove(glob(path_to_folder_containing_images + "/*.db", recursive = True)[0])
def remove_thumbs_all_positive_chips(parent_directory):
""" Remove Thumbs.db file from all chips_positive folders in parent directory
Args:
parent_directory (str): path to parent directory
"""
for r, d, f in os.walk(parent_directory):
folder_name = os.path.basename(r) #identify folder name
if 'chips_positive' == folder_name: #Specify folders that contain positive chips
remove_thumbs(r)
########### Extract information from tile_names_tile urls numpy arrays ##########
def add_formatted_and_standard_tile_names_to_tile_names_time_urls(tile_names_tile_urls):
#get a list of the formated tile names
tile_names = []
for tile_url in tile_names_tile_urls:
tile_url = tile_url[1].rsplit("/",3)
#get the quad standard tile name
tile_name = tile_url[3]
tile_name = os.path.splitext(tile_name)[0]
#format tile_names to only include inital capture date 1/20
if tile_name.count("_") > 5:
tile_name = tile_name.rsplit("_",1)[0]
#get the tile name formated (1/14/2022)
tile_name_formatted = tile_url[1] + "_" + tile_url[2] + "_" + tile_url[3]
tile_name_formatted = os.path.splitext(tile_name_formatted)[0]
tile_names.append([tile_name, tile_name_formatted])
#create array that contains the formated tile_names and tile_names
tile_names_tile_urls_formatted_tile_names = np.hstack((tile_names_tile_urls, np.array(tile_names)))
return(tile_names_tile_urls_formatted_tile_names)
def unique_formatted_standard_tile_names(tile_names_tile_urls_complete_array):
unique_tile_name_formatted, indicies = np.unique(tile_names_tile_urls_complete_array[:,3], return_index = True)
tile_names_tile_urls_complete_array_unique_formatted_tile_names = tile_names_tile_urls_complete_array[indicies,:]
print("unique formatted tile names", tile_names_tile_urls_complete_array_unique_formatted_tile_names.shape)
unique_tile_name_standard, indicies = np.unique(tile_names_tile_urls_complete_array[:,2], return_index = True)
tile_names_tile_urls_complete_array_unique_standard_tile_names = tile_names_tile_urls_complete_array[indicies,:]
print("unique standard tile names", tile_names_tile_urls_complete_array_unique_standard_tile_names.shape)
return(tile_names_tile_urls_complete_array_unique_standard_tile_names, tile_names_tile_urls_complete_array_unique_formatted_tile_names)
################## Get tiles names or jpg names from jpg paths ####################
def jpg_path_to_tile_name_formatted(jpg_paths):
tile_names = []
for jpg_path in jpg_paths:
base = os.path.basename(jpg_path)
jpg = os.path.splitext(base)[0] #name of tif with the extension removed
tile_name_formated_name = jpg.rsplit("_",1)[0] #name of tif with the extension removed
tile_names.append(tile_name_formated_name)
return(tile_names)
def jpg_path_to_jpg_name_formatted(jpg_paths):
jpgs_ext = []
jpgs_without_ext = []
for jpg_path in jpg_paths:
jpg_ext = os.path.basename(jpg_path)
jpg_without_ext = os.path.splitext(jpg_ext)[0] #name of tif with the extension removed
jpgs_ext.append(jpg_ext)
jpgs_without_ext.append(jpg_without_ext)
return(jpgs_ext, jpgs_without_ext)
def unique_positive_jpgs_from_parent_directory(parent_directory):
files = []
paths = []
counter = 0
# r=root, d=directories, f = files
# https://mkyong.com/python/python-how-to-list-all-files-in-a-directory/
for r, d, f in tqdm.tqdm(os.walk(parent_directory)):
folder_name = os.path.basename(r) #identify folder name
if 'chips_positive' == folder_name: #Specify folders that contain positive chips
for file in f:
if '.jpg' in file:
paths.append(os.path.join(r, file))
files.append(file)
counter += 1
positive_jpgs = np.array((files,paths)).T
unique_tile_name_formatted_positive_jpgs, indicies = np.unique(positive_jpgs[:,0], return_index = True)
unique_positive_jpgs = positive_jpgs[indicies]
print(unique_positive_jpgs.shape)
return(unique_positive_jpgs)
## Processing Tiles
def move_tiles_of_verified_images_to_complete_dataset(tile_img_annotation, tiles_complete_dataset_path, path_to_verified_sets):
"""Move already downloaded tiles to completed dataset
"""
#obtain the paths of tifs in the verified sets
path_to_tifs_in_verified_sets = glob(path_to_verified_sets + "/**/*.tif", recursive = True)
print("Number of tifs to be moved", len(path_to_tifs_in_verified_sets))
#move verified tifs
for path in path_to_tifs_in_verified_sets:
base = os.path.basename(path)
tif = os.path.splitext(base)[0] #name of tif with the extension removed
if tif in tile_img_annotation[:,0]:
shutil.move(path, os.path.join(tiles_complete_dataset_path,base)) # copy images with matching .xml files in the "chips_tank" folder
def tiles_in_complete_dataset(tiles_complete_dataset_path):
#Make a list of the tiles in the completed dataset
os.makedirs(tiles_complete_dataset_path, exist_ok=True)
tiles_downloaded = os.listdir(tiles_complete_dataset_path)
tiles_downloaded_with_ext_list = []
tiles_downloaded_without_ext_list = []
for tile in tiles_downloaded:
tiles_downloaded_with_ext_list.append(tile)
tiles_downloaded_without_ext_list.append(os.path.splitext(tile)[0]) #name of tif with the extension removed
return(np.array(tiles_downloaded_with_ext_list), np.array(tiles_downloaded_without_ext_list))
def jpg_paths_to_tiles_without_ext(jpg_paths):
"""
Determine which tiles corresponding to jpg that have been annotated #jpg_tiles
Get a numpy array of the unique standard tile names corresponding to a list of jpg paths
Args:
jpgs_paths(list): list of jpg paths
Returns:
tiles(numpy):
"""
tiles = []
for path in jpg_paths:
base = os.path.basename(path)
img = os.path.splitext(base)[0] #name of tif with the extension removed
tile = img.rsplit("_",1)[0]
tile = tile.split("_",4)[4] #get the tile names to remove duplicates from being downloaded
tiles.append(tile)
return(np.unique(tiles))
##############################################################################################################################
################################### Chip Tiles #################################################
##############################################################################################################################
def tile_to_chip_array(tile, x, y, item_dim):
"""
##
x: col index
y: row index
"""
dimensions = tile.shape[2]
chip_img = tile[y*item_dim:y*item_dim+item_dim, x*(item_dim):x*(item_dim)+item_dim]
#add in back space if it is the edge of an image
if (chip_img.shape[0] != 512) & (chip_img.shape[1] != 512): #width
#print("Incorrect Width")
chip = np.zeros((512,512,dimensions), np.uint8)
chip[0:chip_img.shape[0], 0:chip_img.shape[1]] = chip_img
chip_img = chip
if chip_img.shape[0] != 512: #Height
black_height = 512 - chip_img.shape[0] #Height
black_width = 512 #- chip_img.shape[1] #width
black_img = np.zeros((black_height,black_width, dimensions), np.uint8)
chip_img = np.concatenate([chip_img, black_img])
if chip_img.shape[1] != 512: #width
black_height = 512 #- chip_img.shape[0] #Height
black_width = 512 - chip_img.shape[1] #width
black_img = np.zeros((black_height,black_width, dimensions), np.uint8)
chip_img = np.concatenate([chip_img, black_img],1)
return(chip_img)
############## Download Tiles ##########################################################################################
def download_tiles_of_verified_images(positive_images_complete_dataset_path, tiles_complete_dataset_path, tiles_downloaded, tile_names_tile_urls_complete_array):
"""
# Download remaining tiles that correspond to ONLY to verified images
#Gather the locations of tiles that have already been downlaoded and verified
"""
# Make a list of the tiles moved to completed dataset
tiles_downloaded_with_ext, tiles_downloaded = tiles_in_complete_dataset(tiles_complete_dataset_path)
positive_jpg_paths = glob(positive_images_complete_dataset_path + "/*.jpg", recursive = True)
print("number of positive and verified images:", len(positive_jpg_paths))
# Determine which tiles corresponding to jpg that have been annotated #jpg_tiles
positive_jpg_tiles = jpg_paths_to_tiles_without_ext(positive_jpg_paths)
print("the number of tiles corresponding to verified images:", len(positive_jpg_tiles))
# Identify tiles that have not already been downloaded
tiles_to_download = []
for tile in positive_jpg_tiles: #index over the downloaded tiled
if tile not in tiles_downloaded: #index over the tiles that should be downloded
tiles_to_download.append(tile)
print("the number of tiles that need to be downloaded:", len(tiles_to_download))
# Download Tiles
tile_names = []
tile_urls = []
file_names = []
tile_names_without_year = []
for tile in tiles_to_download:
### download the tiles if they are not in the tiles folder
#check if the tile name is contained in the string of complete arrays
tile_name = [string for string in tile_names_tile_urls_complete_array[:,0] if tile in string]
if len(tile_name) == 1: #A single tile name # get tile url from the first (only) entry
tile_url = tile_names_tile_urls_complete_array[tile_names_tile_urls_complete_array[:,0]==tile_name[0]][0][1]
tile_names.append(tile_name[0])
tile_urls.append(tile_url)
elif len(np.unique(tile_name)) > 1: # Multiple (different tiles) possibly the same tiles in different states, possible different years
tile_url = tile_names_tile_urls_complete_array[tile_names_tile_urls_complete_array[:,0]==tile_name[0]][0][1]# get tile url
tile_names.append(tile_name[0])
tile_urls.append(tile_url)
elif (len(tile_name) > 1): #Multiple different tile names that are the same, probably different naip storage locations
# get tile url from the second entry
tile_url = tile_names_tile_urls_complete_array[tile_names_tile_urls_complete_array[:,0]==tile_name[1]][1][1]
tile_names.append(tile_name[1])
tile_urls.append(tile_url)
#get file name
file_name = tile_name[0]
if tile_name[0].count("_") > 5:
tile_name = tile_name[0].rsplit("_",1)[0]
file_name = tile_name + ".tif"
print(file_name)
### Download tile
file_names.append(ap.download_url(tile_url, tiles_complete_dataset_path,
destination_filename = file_name,
progress_updater=ap.DownloadProgressBar()))
#get the tile_names without the year
for file_name in file_names:
tile_names_without_year.append(file_name.rsplit("_",1)[0])
return(np.array((tile_names, tile_urls, file_names, tile_names_without_year)).T)
def downloaded_tifs_tile_names_tile_urls_file_names_tile_names_without_year(tile_path, tile_names_tile_urls_complete_array):
#remove thumbs
remove_thumbs(tile_path)
tif_paths = glob(tile_path + "/**/*.tif", recursive = True)
tile_names = []
tile_urls = []
file_names = []
tile_names_without_year = []
for path in tif_paths:
base = os.path.basename(path)
tile_name = os.path.splitext(base)[0] #name of tif with the extension removed
#check if the tile name is contained in the string of complete arrays
tile_name = [string for string in tile_names_tile_urls_complete_array[:,0] if tile_name in string]
if len(tile_name) == 1: #A single tile name # get tile url from the first (only) entry
tile_url = tile_names_tile_urls_complete_array[tile_names_tile_urls_complete_array[:,0]==tile_name[0]][0][1]
tile_names.append(tile_name[0])
tile_urls.append(tile_url)
elif len(np.unique(tile_name)) > 1: # Multiple (different tiles) possibly the same tiles in different states, possible different years
tile_url = tile_names_tile_urls_complete_array[tile_names_tile_urls_complete_array[:,0]==tile_name[0]][0][1]# get tile url
tile_names.append(tile_name[0])
tile_urls.append(tile_url)
elif (len(tile_name) > 1): #Multiple different tile names that are the same, probably different naip storage locations
# get tile url from the second entry
tile_url = tile_names_tile_urls_complete_array[tile_names_tile_urls_complete_array[:,0]==tile_name[1]][1][1]
tile_names.append(tile_name[1])
tile_urls.append(tile_url)
#get file name
file_name = tile_name[0]
if tile_name[0].count("_") > 5:
tile_name = tile_name[0].rsplit("_",1)[0]
file_name = tile_name + ".tif"
file_names.append(file_name)
### Download tile
#get the tile_names without the year
for file_name in file_names:
tile_names_without_year.append(file_name.rsplit("_",1)[0])
return(np.array((tile_names, tile_urls, file_names, tile_names_without_year)).T)
####### add chips to rechip folder ############################################################
def add_chips_to_chip_folders(rechipped_image_path, tile_name):
"""
Args:
remaining_chips_path (str): path to folder that will contain all of the remaining images
that have not been labeled and correspond to tiles that have labeled images
tile_name (str): name of tile without of extension
"""
chips_path = os.path.join(rechipped_image_path, tile_name, "chips")
os.makedirs(chips_path, exist_ok=True)
item_dim = int(512)
tile = cv2.imread(os.path.join(tiles_complete_dataset_path, tile_name + ".tif"),cv2.IMREAD_UNCHANGED)
tile_height, tile_width, tile_channels = tile.shape #the size of the tile
row_index = math.ceil(tile_height/512)
col_index = math.ceil(tile_width/512)
#print(row_index, col_index)
count = 1
for y in range(0, row_index): #rows
for x in range(0, col_index): #cols
chip_img = tile_to_chip_array(tile, x, y, item_dim)
#specify the chip names
chip_name_correct_chip_name = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" + '.jpg' # The index is a six-digit number like '000023'.
if not os.path.exists(os.path.join(chips_path, chip_name_correct_chip_name)):
cv2.imwrite(os.path.join(chips_path, chip_name_correct_chip_name), chip_img) #save images | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/processing_organizing_images_tiles.py | processing_organizing_images_tiles.py |
Load Packages
"""
# Standard modules
import tempfile
import warnings
import urllib
import urllib.request
#import shutils
import shutil
import os
import os.path
from pathlib import Path
import sys
from zipfile import ZipFile
import pickle
import math
from contextlib import suppress
from glob import glob
import xml.dom.minidom
from xml.dom.minidom import parseString
import xml.etree.ElementTree as et
from xml.dom import minidom
import xml
# Less standard, but still pip- or conda-installable
import pandas as pd
import numpy as np
import progressbar # pip install progressbar2, not progressbar
from tqdm import tqdm
import cv2
# Image processing files
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import ConnectionPatch
import matplotlib.image as mpimg
#import rasterio
#from rasterio.windows import Window #
import re
import rtree
import shapely
from geopy.geocoders import Nominatim
import PIL
#print('PIL',PIL.__version__)
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
#Parsing/Modifying XML
from lxml.etree import Element,SubElement,tostring
import data_eng.form_calcs as fc
############################################################################################################
######################################### Azure Functions #############################################
############################################################################################################
class DownloadProgressBar():
"""
A progressbar to show the completed percentage and download speed for each image downloaded using urlretrieve.
https://stackoverflow.com/questions/37748105/how-to-use-progressbar-module-with-urlretrieve
"""
def __init__(self):
self.pbar = None
def __call__(self, block_num, block_size, total_size):
if not self.pbar:
self.pbar = progressbar.ProgressBar(max_value=total_size)
self.pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
self.pbar.update(downloaded)
else:
self.pbar.finish()
class NAIPTileIndex:
"""
Utility class for performing NAIP tile lookups by location.
"""
tile_rtree = None
tile_index = None
base_path = None
def __init__(self, base_path=None):
blob_root = 'https://naipeuwest.blob.core.windows.net/naip'
index_files = ["tile_index.dat", "tile_index.idx", "tiles.p"]
index_blob_root = re.sub('/naip$','/naip-index/rtree/', blob_root)
if base_path is None:
base_path = os.path.join(tempfile.gettempdir(),'naip')
os.makedirs(base_path,exist_ok=True)
for file_path in index_files:
download_url_no_destination_folder(index_blob_root + file_path, base_path + '/' + file_path,
progress_updater=DownloadProgressBar())
self.base_path = base_path
self.tile_rtree = rtree.index.Index(base_path + "/tile_index")
self.tile_index = pickle.load(open(base_path + "/tiles.p", "rb"))
def lookup_tile(self, lat, lon):
""""
Given a lat/lon coordinate pair, return the list of NAIP tiles that contain
that location.
Returns a list of COG file paths.
"""
point = shapely.geometry.Point(float(lon),float(lat))
intersected_indices = list(self.tile_rtree.intersection(point.bounds))
intersected_files = []
tile_intersection = False
for idx in intersected_indices:
intersected_file = self.tile_index[idx][0]
intersected_geom = self.tile_index[idx][1]
if intersected_geom.contains(point):
tile_intersection = True
intersected_files.append(intersected_file)
if not tile_intersection and len(intersected_indices) > 0:
print('''Error: there are overlaps with tile index,
but no tile completely contains selection''')
return None
elif len(intersected_files) <= 0:
print("No tile intersections")
return None
else:
return intersected_files
def download_url_no_destination_folder(url, destination_filename=None, progress_updater=None, force_download=False):
"""
Download a URL to a temporary file
# This is not intended to guarantee uniqueness, we just know it happens to guarantee
# uniqueness for this application.
"""
temp_dir = os.path.join(tempfile.gettempdir(),'naip')
os.makedirs(temp_dir,exist_ok=True)
if destination_filename is None:
url_as_filename = url.replace('://', '_').replace('/', '_')
destination_filename = \
os.path.join(temp_dir,url_as_filename)
if (not force_download) and (os.path.isfile(destination_filename)):
print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url)))
return destination_filename
print('Downloading file {} to {}'.format(os.path.basename(url),destination_filename),end='')
urllib.request.urlretrieve(url, destination_filename, progress_updater)
assert(os.path.isfile(destination_filename))
nBytes = os.path.getsize(destination_filename)
print('...done, {} bytes.'.format(nBytes))
return destination_filename
def download_url(url, destination_folder, destination_filename=None, progress_updater=None, force_download=False):
"""
Download a URL to a a file
Args:
url(str): url to download
destination_folder(str): directory to download folder
destination_filename(str): the name for each of files to download
return:
destination_filename
"""
# This is not intended to guarantee uniqueness, we just know it happens to guarantee
# uniqueness for this application.
if destination_filename is not None:
destination_filename = os.path.join(destination_folder, destination_filename)
if destination_filename is None:
url_as_filename = url.replace('://', '_').replace('/', '_')
destination_filename = os.path.join(destination_folder, url_as_filename)
if os.path.isfile(destination_filename):
print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url)))
return destination_filename
# print('Downloading file {} to {}'.format(os.path.basename(url),destination_filename),end='')
urllib.request.urlretrieve(url, destination_filename, progress_updater)
assert(os.path.isfile(destination_filename))
nBytes = os.path.getsize(destination_filename)
print('...done, {} bytes.'.format(nBytes))
return destination_filename
def display_naip_tile(filename):
"""
Display a NAIP tile using rasterio.
"""
dsfactor = 10
with rasterio.open(filename) as raster:
# NAIP imagery has four channels: R, G, B, IR
# Stack RGB channels into an image; we won't try to render the IR channel
# rasterio uses 1-based indexing for channels.
h = int(raster.height/dsfactor)
w = int(raster.width/dsfactor)
print('Resampling to {},{}'.format(h,w))
r = raster.read(1, out_shape=(1, h, w))
g = raster.read(2, out_shape=(1, h, w))
b = raster.read(3, out_shape=(1, h, w))
rgb = np.dstack((r,g,b))
fig = plt.figure(figsize=(7.5, 7.5), dpi=100, edgecolor='k')
plt.imshow(rgb)
raster.close()
def get_coordinates_from_address(address):
"""
Look up the lat/lon coordinates for an address.
"""
geolocator = Nominatim(user_agent="NAIP")
location = geolocator.geocode(address)
print('Retrieving location for address:\n{}'.format(location.address))
return location.latitude, location.longitude
"""
# Functions to retrieve filepathways from EIA HFID datasources
"""
# %%
def lons_lat_to_filepaths(lons, lats, index):
"""
Calculate file paths given lat and lat
"""
all_paths = np.empty(shape=(1,8))
for i in tqdm(range(len(lons))):
naip_file_pathways = index.lookup_tile(lats[i], lons[i])
if naip_file_pathways != None:
select_path = []
for ii in range(len(naip_file_pathways)):
tmp = naip_file_pathways[ii].split('/')
tmp = np.hstack((tmp, naip_file_pathways[ii].split('/')[3].split("_")[1]))
iii = iter(tmp[5].split("_",4))
tmp = np.hstack((tmp, list((map("_".join,zip(*[iii]*4)) ))))
select_path.append(tmp)
select_path = np.array(select_path)
select_path = select_path[select_path[:,2] >= "2018"] #filter out years to get the most recent data that will include the highest resolution data
select_path = select_path[(select_path[:,6] == "60cm") | (select_path[:,6] == "060cm")] #select only pathways with 60cm
all_paths = np.vstack((all_paths, select_path)) #add to the rest of the paths
file_pathways = np.delete(all_paths, 0, axis=0)
file_pathways = np.unique(file_pathways, axis=0) #select unique values
return file_pathways
def filepaths_to_tile_name_tile_url(file_pathways):
"""
Determine the tile name and url for a given file pathway
"""
tile_name = []
tile_url = []
#blob_root = 'https://naipblobs.blob.core.windows.net/naip'
blob_root = 'https://naipeuwest.blob.core.windows.net/naip'
for i in range(len(file_pathways)):
tile_name.append(file_pathways[i,5])
# Tiles are stored at: [blob root]/v002/[state]/[year]/[state]_[resolution]_[year]/[quadrangle]/filename
tile_url.append(blob_root + '/v002/' + file_pathways[i,1] + '/'+ file_pathways[i,2] + '/' \
+ file_pathways[i,3] +'/'+ file_pathways[i,4] +'/'+ file_pathways[i,5] )
return (tile_name, tile_url)
"""
Function to retrieve file pathways from Group identified ASTs
"""
def collected_quads_to_tile_name_tile_url(quads):
"""
Read in a excel sheet which includes the quadrangle
"""
tile_name = []
tile_url = []
file_name_index = {'m': 0, 'qqname': 1, 'direction': 2,'YY': 3, 'resolution': 4,'capture_date': 5,'version_date': 5}
blob_root = 'https://naipblobs.blob.core.windows.net/naip'
two_digit_state_resolution = ["al","ak","az","ar","ca", "co","ct","de","fl","ga",
"hi","id","il","in","ia", "ks","ky","la","me","md",
"ma","mi","mn","ms","mo", "mt","ne","nv","nh","nj",
"nm","ny","nc","nd","oh", "ok","or","pa","ri","sc",
"sd","tn","tx","ut","vt", "va", "wa","wv","wi","wy"]
for i in range(len(quads)):
file_name = quads.iloc[i,3].split('_') #filename
state = quads.iloc[i,6].lower() #state
year = quads.iloc[i,5] # YYYY
if state in two_digit_state_resolution:
resolution = file_name[file_name_index["resolution"]][1:3]+"cm"
else:
resolution = file_name[file_name_index["resolution"]]+"cm"
quadrangle = file_name[file_name_index["qqname"]][0:5] #qqname
tile_name.append(quads.iloc[i,3] +'.tif')
tile_url.append(blob_root + '/v002/' + state + '/' + str(year)+ '/' + state + '_' + resolution \
+ '_' + str(year) + '/' + str(quadrangle) + '/' + tile_name[i])
# Tiles are stored at: [blob root]/v002/[state]/[year]/[state]_[resolution]_[year]/[quadrangle]/filename
return (tile_name, tile_url)
def tile_characeteristics(tile_name_tile_url_eia_hfid_thirty_ports):
"""tabulates the tile characteristics (the states, year resolution ranges),
returns the tile charcateristics
(quadrange names, the filenames,the states, year resolution ranges)
Args:
file_loc (str): The file location of the spreadsheet
print_cols (bool): A flag used to print the columns to the console
(default is False)
Returns:
list: a list of strings representing the header columns
"""
state_array = np.empty((len(tile_name_tile_url_eia_hfid_thirty_ports), 1), dtype = object)
year_array = np.empty((len(tile_name_tile_url_eia_hfid_thirty_ports), 1))
quad_array = np.empty((len(tile_name_tile_url_eia_hfid_thirty_ports), 1))
resolution_array = np.empty((len(tile_name_tile_url_eia_hfid_thirty_ports), 1), dtype = object)
filename_array = np.empty((len(tile_name_tile_url_eia_hfid_thirty_ports), 1), dtype = object)
for i in range(len(tile_name_tile_url_eia_hfid_thirty_ports)):
state_array[i] = tile_name_tile_url_eia_hfid_thirty_ports[i,1].split('/')[5]
year_array[i] = tile_name_tile_url_eia_hfid_thirty_ports[i,1].split('/')[6]
quad_array[i] = tile_name_tile_url_eia_hfid_thirty_ports[i,1].split('/')[8]
filename_array[i] = tile_name_tile_url_eia_hfid_thirty_ports[i,1].split('/')[9]
resolution_array[i] = tile_name_tile_url_eia_hfid_thirty_ports[i,1].split('/')[-3].split('_')[1]
num_states = len(np.unique(state_array))
state_abbreviations = np.unique(state_array)
years = np.unique(year_array)
resolutions = np.unique(resolution_array)
print("the number of tiles includes", len(tile_name_tile_url_eia_hfid_thirty_ports))
print("The number of states included", num_states)
print("Postal abriviations of the states included", state_abbreviations)
print("The years in which the images were collected", years)
print("The resolutions of the images", resolutions)
return num_states, state_abbreviations, years, resolutions, quad_array, filename_array
"""
Tile distribution functions
"""
class annotator:
def __init__(self, sub_directory):
self.sub_directory = sub_directory
def state_dcc_directory(self, dcc_directory):
self.dcc_directory = dcc_directory
def number_of_tiles(self, num_tiles):
self.num_tiles = num_tiles
def get_tile_urls(self, tile_name_tile_url_unlabeled):
"""
self.tile_name_tile_url_unlabeled: npy array of the initial tiles that have not been labeled
self.tile_name_tile_url_tiles_for_annotators: npy array of the tiles to be allocated to the annotator
"""
self.tile_name_tile_url_unlabeled = np.load(tile_name_tile_url_unlabeled) #the tiles that have not yet been labeled to date
print("Unlabeled Tiles",self.tile_name_tile_url_unlabeled.shape)
self.tile_name_tile_url_tiles_for_annotators = self.tile_name_tile_url_unlabeled[range(self.num_tiles),:] #create an array of the tiles that will be allocated to this annotator
self.tile_url = self.tile_name_tile_url_tiles_for_annotators[:,1] #get the urls of the tiles that will allocated to the annotator
def track_tile_annotations(self, tile_name_tile_url_labeled):
"""
self.tile_name_tile_url_remaining: npy array of the remaining tiles to be annotated; this will then be passed in the next iteration
self.tile_name_tile_url_labeled: npy array of the tiles labeled
"""
self.tile_name_tile_url_labeled = np.load(tile_name_tile_url_labeled) #the tiles that have not yet been labeled to date
self.tile_name_tile_url_labeled = np.concatenate((self.tile_name_tile_url_labeled, self.tile_name_tile_url_tiles_for_annotators), axis=0)
print("Labeled Tiles", self.tile_name_tile_url_labeled.shape)
self.tile_name_tile_url_remaining = np.delete(self.tile_name_tile_url_unlabeled, range(self.num_tiles), 0) #the numpy array of the remaining tiles
#(remove the tiles that the annotator is labeling)
print(self.tile_name_tile_url_remaining.shape)
if len(self.tile_name_tile_url_tiles_for_annotators) + len(self.tile_name_tile_url_remaining) != len(self.tile_name_tile_url_unlabeled):
raise Exception("The number of remaining tiles and the tiles allocated to annotaters is less \
than the number of tiles passed through this function")
def make_subdirectories(self):
self.new_dir = self.dcc_directory + "/" + self.sub_directory
os.makedirs(self.new_dir, exist_ok = True)
self.tiles_dir = os.path.join(self.new_dir,'tiles') #directory for the naip data
os.makedirs(self.tiles_dir,exist_ok=True)
self.chips_dir = os.path.join(self.new_dir,'chips') #directory to hold chips that are clipped from naip tiles
os.makedirs(self.chips_dir,exist_ok=True)
self.chips_positive_dir = os.path.join(self.new_dir,'chips_positive') #directory to hold chips with tanks
os.makedirs(self.chips_positive_dir,exist_ok=True)
self.chips_negative_dir = os.path.join(self.new_dir,'chips_negative') #directory to hold chips with tanks
os.makedirs(self.chips_negative_dir,exist_ok=True)
self.chips_xml_dir = os.path.join(self.new_dir,'chips_positive_xml') #directory to hold xml files
os.makedirs(self.chips_xml_dir,exist_ok=True)
#Make directory to store all xml after correction
self.chips_positive_corrected_xml_dir = os.path.join(self.new_dir,"chips_positive_corrected_xml")
os.makedirs(self.chips_positive_corrected_xml_dir, exist_ok = True)
def download_images(self):
destination_of_filenames = [] #use so that we can index over the file names for processing later
for i in range(self.num_tiles):
print(i)
destination_of_filenames.append(download_url(self.tile_url[i], self.tiles_dir,
progress_updater=DownloadProgressBar()))
return destination_of_filenames
def tile_rename(self):
"""Rename all the tiles into the standard format outlined in repo readme
"""
self.tile_names = os.listdir(self.tiles_dir) #get a list of all of the tiles in tiles directory
print(self.tile_names)
for tile_name in self.tile_names:
tile_name_split = tile_name.split('_')
old_tile_path = os.path.join(self.tiles_dir, tile_name)
new_tile_path = os.path.join(self.tiles_dir, tile_name_split[6]+'_'+tile_name_split[7]+'_'+tile_name_split[8]+'_'+tile_name_split[9]+'_'+ \
tile_name_split[10]+'_'+tile_name_split[11]+'_'+tile_name_split[12]+'_'+tile_name_split[13]+'_'+ \
tile_name_split[14]+'_'+tile_name_split[15].split(".")[0]+".tif")
if os.path.isfile(new_tile_path):
print('Bypassing download of already-downloaded file {}'.format(os.path.basename(new_tile_path)))
else:
os.rename(old_tile_path, new_tile_path)
def tile_rename_standard(self):
"""Rename all the tiles into the standard format outlined in repo readme
"""
self.tile_names = os.listdir(self.tiles_dir) #get a list of all of the tiles in tiles directory
print(self.tile_names)
for tile_name in self.tile_names:
tile_name_split = tile_name.split('_')
old_tile_path = os.path.join(self.tiles_dir, tile_name)
new_tile_path = os.path.join(self.tiles_dir, tile_name_split[6]+'_'+tile_name_split[7]+'_'+tile_name_split[8]+'_'+tile_name_split[9]+'_'+ \
tile_name_split[10]+'_'+tile_name_split[11]+'_'+tile_name_split[12]+'_'+tile_name_split[13]+'_'+ \
tile_name_split[14]+'_'+tile_name_split[15].split(".")[0]+".tif")
if os.path.isfile(new_tile_path):
print('Bypassing download of already-downloaded file {}'.format(os.path.basename(new_tile_path)))
else:
os.rename(old_tile_path, new_tile_path)
def chip_tiles(self):
"""Segment tiles into 512 x 512 pixel chips, preserving resolution
"""
print("chip tiles")
self.tile_names = os.listdir(self.tiles_dir) #get a list of all of the tiles in tiles directory
for tile_name in self.tile_names: #index over the tiles in the tiles_dir
file_name, ext = os.path.splitext(tile_name) # File name
print(tile_name)
item_dim = int(512)
count = 1
tile = cv2.imread(os.path.join(self.tiles_dir, tile_name))
tile_height, tile_width, tile_channels = tile.shape #the size of the tile
#divide the tile into 512 by 512 chips (rounding up)
row_index = math.ceil(tile_height/512)
col_index = math.ceil(tile_width/512)
#print(row_index, col_index)
for y in range(0, row_index):
for x in range(0, col_index):
#https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python
chip_img = tile[y*item_dim:y*item_dim+item_dim, x*(item_dim):x*(item_dim)+item_dim]
#specify the path to save the image
chip_name_correct_chip_name = file_name + '_' + f"{y:02}" + '_' + f"{x:02}" + '.jpg' #
chips_save_path = os.path.join(self.chips_dir, chip_name_correct_chip_name) # row_col.jpg
#add in back space if it is the edge of an image
if (chip_img.shape[0] != 512) & (chip_img.shape[1] != 512): #width
#print("Incorrect Width")
chip = np.zeros((512,512,3))
chip[0:chip_img.shape[0], 0:chip_img.shape[1]] = chip_img
chip_img = chip
if chip_img.shape[0] != 512: #Height
#print("Incorrect Height")
black_height = 512 - chip_img.shape[0] #Height
black_width = 512
black_img = np.zeros((black_height,black_width,3), np.uint8)
chip_img = np.concatenate([chip_img, black_img])
if chip_img.shape[1] != 512: #width
#print("Incorrect Width")
black_height = 512
black_width = 512 - chip_img.shape[1] #width
black_img = np.zeros((black_height,black_width,3), np.uint8)
chip_img = np.concatenate([chip_img, black_img],1)
#save image
cv2.imwrite(os.path.join(chips_save_path), chip_img)
#counter for image pathway
count += 1
print(count)
def copy_positive_images(self):
"""seperate out positive chips into specific directory.
"""
# Input .xml files' names
print("it ran")
for annotation in os.listdir(self.chips_xml_dir): #iterate through the annotations
annotation_filename = os.path.splitext(annotation)[0]
for image in os.listdir(self.chips_dir): #iterate through the images
image_filename = os.path.splitext(image)[0]
if image_filename == annotation_filename:
shutil.copy(os.path.join(self.chips_dir, image), self.chips_positive_dir) # copy images with matching .xml files in the "chips_tank" folder
print("it finished")
def copy_negative_images(self):
"""seperate out negative chips into specific directory.
"""
print("it ran")
for image in os.listdir(self.chips_dir):
shutil.copy(os.path.join(self.chips_dir, image), self.chips_negative_dir) # copy all chips into negative folder
for annotation in os.listdir(self.chips_xml_dir):
annotation_filename = os.path.splitext(annotation)[0]
for image in os.listdir(self.chips_dir):
image_filename = os.path.splitext(image)[0]
if image_filename == annotation_filename:
os.remove(os.path.join(self.chips_negative_dir, image)) #delete positive images according to the .xml files
print("it finished")
def correct_inconsistent_labels_xml(self):
#Define lists of positive images and xml files
self.chips_positive_list = glob(self.chips_positive_dir + '/*.jpg') #os.listdir(img_path)
self.chips_xml_list = os.listdir(self.chips_xml_dir)
#calculate the number of images
number_of_images = len(self.chips_xml_list)
#Create a list of the possible names that each category may take
correctly_formatted_object = ["closed_roof_tank","narrow_closed_roof_tank",
"external_floating_roof_tank","sedimentation_tank",
"water_tower","undefined_object","spherical_tank"]
object_dict = {"closed_roof_tank": "closed_roof_tank",
"closed_roof_tank ": "closed_roof_tank",
"closed roof tank": "closed_roof_tank",
"narrow_closed_roof_tank": "narrow_closed_roof_tank",
"external_floating_roof_tank": "external_floating_roof_tank",
"external floating roof tank": "external_floating_roof_tank",
'external_floating_roof_tank ': "external_floating_roof_tank",
'external_closed_roof_tank': "external_floating_roof_tank",
"water_treatment_tank": "sedimentation_tank",
'water_treatment_tank ': "sedimentation_tank",
"water_treatment_plant": "sedimentation_tank",
"water_treatment_facility": "sedimentation_tank",
"water_tower": "water_tower",
"water_tower ": "water_tower",
'water_towe': "water_tower",
"spherical_tank":"spherical_tank",
'sphere':"spherical_tank",
'spherical tank':"spherical_tank",
"undefined_object": "undefined_object",
"silo": "undefined_object" }
#calculate the number of images
number_of_images = len(self.chips_xml_list)
#"enumerate each image" This chunk is actually just getting the paths for the images and annotations
for i in range(len(self.chips_xml_list)):
xml_file = self.chips_xml_list[i]
# use the parse() function to load and parse an XML file
tree = et.parse(os.path.join(self.chips_xml_dir, xml_file))
root = tree.getroot()
for obj in root.iter('object'):
for name in obj.findall('name'):
if name.text not in correctly_formatted_object:
name.text = object_dict[name.text]
if int(obj.find('difficult').text) == 1:
obj.find('truncated').text = '1'
obj.find('difficult').text = '1'
tree.write(os.path.join(self.chips_positive_corrected_xml_dir, xml_file))
def move_images_annotations_to_complete_dataset(self, complete_dir_path, include_tiles = False, original = True):
"""seperate out all of the positive chips, annotations, and conditionally tiles from one directory into a new folder.
Args:
file_loc (str): The file location of the spreadsheet
include_tiles (bool; default = False): Specifies whether the full tiles should be moved
original include_tiles (bool; default = True): Specifies whether the original annotation in chips positive or the corrected
annotation in chips_positive_xml should be used
Returns:
len(annotations): number of annotations
len(images): number of images
"""
#make a complete dataset
self.complete_dataset_xml_dir = os.path.join(complete_dir_path, "complete_dataset",'chips_positive_xml')
os.makedirs(self.complete_dataset_xml_dir, exist_ok=True) #directory to hold entire dataset annotations
self.complete_dataset_chips_dir = os.path.join(complete_dir_path, "complete_dataset","chips_positive")
os.makedirs(self.complete_dataset_chips_dir, exist_ok=True) #directory to hold xml files
#Move annotations
if original:
annotations_path = self.chips_xml_dir
annotations = os.listdir(annotations_path)
if not original:
annotations_path = self.chips_positive_corrected_xml_dir
annotations = os.listdir(annotations_path)
for a in annotations:
#copy annotations
shutil.copy(os.path.join(annotations_path, a), self.complete_dataset_xml_dir)
# remove thumpbs
fc.remove_thumbs(self.chips_positive_dir)
#Move images
images = os.listdir(self.chips_positive_dir)
for i in images:
#move images
shutil.copy(os.path.join(self.chips_positive_dir, i), self.complete_dataset_chips_dir)
#Move tiles
if include_tiles:
self.complete_dataset_tile_dir = os.path.join(self.dcc_directory,"complete_dataset","tiles")
os.makedirs(self.complete_dataset_chips_dir, exist_ok=True) #directory to hold xml files
tiles = os.listdir(self.tiles_dir)
for t in tiles:
#move images
shutil.copy(os.path.join(self.tiles_dir, t), self.complete_dataset_tile_dir)
#print(len(annotations),len(images))
return len(annotations), len(images)
"""
Find file paths
"""
def list_of_sub_directories(path_to_images):
"""
Define a function to create a list of the directories in the storage space containing images
Find the subdirectories containing images
"""
sub_directories = [] #initialize list
for folder in os.listdir(path_to_images): #identifies the subfolders
d = path_to_images + "/"+ folder #creates the complete path for each subfolder
if os.path.isdir(d):
sub_directories.append(d) #adds the subfolder to the list
return sub_directories
def img_path_anno_path(sub_directories):
"""
### Define a function to create a list of the annotation and positive_chip paths for each of the subdirectories
"Create an array of the paths to the folders containing the images and annotation given a subdirectory"
Only create paths for subdirectories that have these paths and for subdirectories that are correctly formated (Qianyu's thesis, etc.)
"""
img_path = []
anno_path = []
for i in range(len(sub_directories)):
if "chips" in os.listdir(sub_directories[i]):
img_path.append(sub_directories[i] + "/" + "chips_positive")
anno_path.append(sub_directories[i] + "/" + "chips_positive_xml")
elif "chips_positive" in os.listdir(sub_directories[i]):
img_path.append(sub_directories[i] + "/" + "chips_positive")
anno_path.append(sub_directories[i] + "/" + "chips_positive_xml")
else:
for ii in range(len(os.listdir(sub_directories[i]))):
img_path.append(sub_directories[i] + "/" + os.listdir(sub_directories[i])[ii] + "/" + "chips_positive")
anno_path.append(sub_directories[i] + "/" + os.listdir(sub_directories[i])[ii] + "/" + "chips_positive_xml")
img_annotation_path = np.empty((1,2)) #form a numpy array
for i in range(len(img_path)):
if os.path.isdir(img_path[i]) == True:
img_annotation_path = np.vstack((img_annotation_path,
[img_path[i], anno_path[i]]))
img_annotation_path = np.delete(img_annotation_path, 0, axis=0) #0 removes empty row
return img_annotation_path
"""
Check and track annotations
"""
def check_for_missing_images_annotations(img_annotation_path):
"""
Check if images are missing annotations
"""
for i in range(len(img_annotation_path)): #index over each folder
path = Path(img_annotation_path[i,0]) #get root path
parent_dir = path.parent.absolute()
img_files = os.listdir(img_annotation_path[i,0]) #pull the files in the img folder
anno_files = os.listdir(img_annotation_path[i,1]) #pull the files in the annotation folder
anno_files_no_ext = []
for annotation in anno_files: #iterate through the annotations
anno_files_no_ext.append(os.path.splitext(annotation)[0])
img_files_no_ext = []
for image in img_files: #iterate through the images
if image.endswith(".jpg"):
img_files_no_ext.append(os.path.splitext(image)[0])
for image in img_files_no_ext:
if image not in anno_files_no_ext:
print(parent_dir)
print(image)
for anno in anno_files_no_ext:
if anno not in img_files_no_ext:
print(parent_dir)
print(anno)
"""
Tracking and Verification
"""
def reference_image_annotation_file_with_annotator(img_annotation_path,
tracker_file_path = 'outputs/tile_img_annotation_annotator.npy'):
"""
Track image annotations
"""
if os.path.isfile(tracker_file_path): #check if the tracking file exists
print("Initializing annotation tracking array; add new annotations to tracking array")
tile_img_annotation_annotator = np.load(tracker_file_path) #load existing
else:
print("Create new tracking array")
tile_img_annotation_annotator = np.empty((1,8)) #form a numpy array
for i in range(len(img_annotation_path)): #index over each folder
print(img_annotation_path[i,0])
#img files + image_file_pathways
img_files = [] #pull the files in the img folder
img_file_pathways = [] #pull the files in the img folder
for image in os.listdir(img_annotation_path[i,0]): #iterate through the images
if image.endswith(".jpg"):
img_files.append(image)
img_file_pathways.append(os.path.join(img_annotation_path[i,0]))
#sort so that the paths/file names match
img_file_pathways = sorted(img_file_pathways)
img_files = sorted(img_files)
num_img_files = len(img_files)
#tiles
tiles = [] # create a list of the tile names
for image in img_files: #iterate through the images
tiles.append(image.rsplit("_",1)[0])
#annotation files
anno_files = sorted(os.listdir(img_annotation_path[i,1])) #pull the files in the annotation folder
#annotator
path = Path(img_annotation_path[i,0]).parent.absolute() #get root path of chips postive/chips postive xml folder
annotator = str(path).rsplit('\\')[-2] #get the annotator name from the root path
annotator_list = [annotator] * len(anno_files)
#annotator - verify coverage
annotator_verify_coverage = [""] * num_img_files
#annotator - verify coverage
annotator_verify_quality = [""] * num_img_files
#annotator - verify coverage
annotator_verify_classes = [""] * num_img_files
tile_img_annotation_annotator = np.vstack((tile_img_annotation_annotator,
np.column_stack([tiles, img_files, img_file_pathways, anno_files,annotator_list,
annotator_verify_coverage, annotator_verify_quality,
annotator_verify_classes]) ))
if not os.path.isfile(tracker_file_path): #if the file does not exist; remove the initalizing dummy array
tile_img_annotation_annotator = np.delete(tile_img_annotation_annotator, 0, axis=0) #0 removes empty row
return tile_img_annotation_annotator
def update_path(path, tracker_file_path):
"""
If the verfification has not yet been completed, update the image/xml path
"""
img_annotation_path = img_path_anno_path(list_of_sub_directories(path))
#get the correct img files + image_file_pathways
img_files = [] #pull the files in the img folder
img_file_pathways = [] #pull the files in the img folder
for i in range(len(img_annotation_path)): #index over each folder
for image in os.listdir(img_annotation_path[i,0]): #iterate through the images
if image.endswith(".jpg"):
img_file_pathways.append(os.path.join(img_annotation_path[i,0].rsplit("/",1)[0]))
img_files.append(image)
imgs_and_pathways = np.array(list(zip(img_file_pathways, img_files)))
#replace incorrect pathways
tile_img_annotation_annotator = np.load(tracker_file_path)
for i in range(len(tile_img_annotation_annotator)): #i - index for tracker .npy
for ii in np.where(imgs_and_pathways[:,1] == tile_img_annotation_annotator[i,1])[0]: #find the same images, (ii -index for img and pathway array)
if imgs_and_pathways[ii,0] != tile_img_annotation_annotator[i,2]:
tile_img_annotation_annotator[i,2] = imgs_and_pathways[ii,0]
np.save('outputs/tile_img_annotation_annotator.npy', tile_img_annotation_annotator)
column_names = ["tile_name", "chip_name", "chip pathway", "xml annotation",
"annotator - draw","annotator - verify coverage",
"annotator - verify quality", "annotator - verify classes"]
tile_img_annotation_annotator_df = pd.DataFrame(data = tile_img_annotation_annotator,
index = tile_img_annotation_annotator[:,1],
columns = column_names)
tile_img_annotation_annotator_df.to_csv('outputs/tile_img_annotation_annotator_df.csv')
return tile_img_annotation_annotator
def verification_folders(home_directory, folder_name, annotator_allocation, set_number):
"""
Create folder for workers to verify images
Args:
"""
#create verification folder
verification_dir = os.path.join(home_directory,'verification_set'+set_number)
os.makedirs(verification_dir, exist_ok=True)
#pair folder name with annotors
print(folder_name[0])
##create verification subfolder for each group
os.makedirs(os.path.join(verification_dir, "verify_" + folder_name[0]+ "_" + set_number), exist_ok = True) #verification folder for each group
os.makedirs(os.path.join(verification_dir, "verify_" + folder_name[0]+ "_" + set_number, "chips_positive"), exist_ok = True) #image sub folder
os.makedirs(os.path.join(verification_dir, "verify_" + folder_name[0]+ "_" + set_number, "chips_positive_xml"), exist_ok = True) #xml sub folder
folder_annotator_list = [folder_name[0], annotator_allocation]
return(folder_annotator_list, verification_dir)
def seperate_images_for_verification_update_tracking(folder_annotator_list, verification_dir, set_number, tile_img_annotation_annotator):
"""
Move images to verifcation folder
"""
print("folder",folder_annotator_list[0]) #the current folder
count = 0
for i in range(len(folder_annotator_list[1])): #iterate over annotator
print("annotator",folder_annotator_list[1][i]) #the current annotator
for ii in np.where(tile_img_annotation_annotator[:, 4] == folder_annotator_list[1][i])[0]:
if len(tile_img_annotation_annotator[ii,5]) == 0:
tile_img_annotation_annotator[ii,5] = folder_annotator_list[0].split("_")[0].capitalize()#coverage
tile_img_annotation_annotator[ii,6] = folder_annotator_list[0].split("_")[1].capitalize()#quality
tile_img_annotation_annotator[ii,7] = folder_annotator_list[0].split("_")[2].capitalize()#class
shutil.copy(os.path.join(tile_img_annotation_annotator[ii, 2],"chips_positive", tile_img_annotation_annotator[ii, 1]),
os.path.join(verification_dir, "verify_" + folder_annotator_list[0] + "_" + set_number, "chips_positive")) #copy images
shutil.copy(os.path.join(tile_img_annotation_annotator[ii, 2], "chips_positive_xml",
tile_img_annotation_annotator[ii, 3]),
os.path.join(verification_dir, "verify_" + folder_annotator_list[0] + "_" + set_number, "chips_positive_xml")) #copy annotations
count += 1 #count the files allocated to each
print(count)
return tile_img_annotation_annotator
## Old Functions
def verification_folders_specify_in_function(home_directory, set_number):
"""
Create folder for workers to verify images
"""
verification_dir = os.path.join(home_directory,'verification_set2') #create verification folder
os.makedirs(verification_dir, exist_ok=True)
folder_names = ['josh_james_amadu',
'jaewon_james_josh',
'jaewon_james_amadu',
'josh_jaewon_amadu']
annotator_allocation = [['Jaewon','Jamila','Jonathan','Mia','Faiz','Alex'],
['Amadu','Aidan', 'Sunny'],
['Josh', 'Jackson'],
['James','Qianyu', 'Connor', 'Celine', 'group_unreviewed_unverfied_images']]
folder_annotator_list = []
for i in range(len(folder_names)):
print(folder_names[i])
#create verification subfolder for each group
os.makedirs(os.path.join(verification_dir, "verify_" + folder_names[i]+ "_" + set_number), exist_ok = True) #verification folder for each group
os.makedirs(os.path.join(verification_dir, "verify_" + folder_names[i]+ "_" + set_number, "chips"), exist_ok = True) #image sub folder
os.makedirs(os.path.join(verification_dir, "verify_" + folder_names[i]+ "_" + set_number, "chips_xml"), exist_ok = True) #xml sub folder
folder_annotator_list.append([folder_names[i],annotator_allocation[i]])
return(folder_annotator_list, verification_dir)
def seperate_images_for_verification_update_tracking_specify_in_function(folder_annotator_list, verification_dir, set_number, tile_img_annotation_annotator):
"""
Move images to verifcation folder
"""
for i in range(len(folder_annotator_list)): #iterate over folders
print("folder",folder_annotator_list[i][0]) #the current folder
count = 0
for ii in range(len(folder_annotator_list[i][1])): #iterate over annotator
#print("annotator",folder_annotator_list[i][1][ii]) #the current annotator
for iii in np.where(tile_img_annotation_annotator[:, 4] == folder_annotator_list[i][1][ii])[0]:
if len(tile_img_annotation_annotator[iii,5]) == 0:
tile_img_annotation_annotator[iii,5] = folder_annotator_list[i][0].split("_")[0].capitalize()#coverage
tile_img_annotation_annotator[iii,6] = folder_annotator_list[i][0].split("_")[1].capitalize()#quality
tile_img_annotation_annotator[iii,7] = folder_annotator_list[i][0].split("_")[2].capitalize()#class
shutil.copy(tile_img_annotation_annotator[iii, 2],
os.path.join(verification_dir, "verify_" + folder_annotator_list[i][0] + "_" + set_number, "chips")) #copy images
shutil.copy(os.path.join(tile_img_annotation_annotator[iii, 2].rsplit("/",1)[0],"chips_positive_xml",
tile_img_annotation_annotator[iii, 3]),
os.path.join(verification_dir, "verify_" + folder_annotator_list[i][0] + "_" + set_number, "chips_xml")) #copy annotations
count += 1 #count the files allocated to each
print(count)
return tile_img_annotation_annotator
"""
Review Characteristics
"""
def summary_of_dataset(img_path, anno_path):
### Define function to count the number of objects in each category
"""Get summary of the whole dataset
Args:
img_path (str): The path of the folder containing original images
anno_path (str): The path of the folder containing original annotation files
Returns:
summary_table (pandas df): A dataframe summary table of the number of objects in each class
unknown_object_name (array): An array of the labels ascribes to objects that are not counted in the other existing categories
number_of_images (int): the number of images in the summary table
"""
#Define lists
print(anno_path)
img_list = glob(img_path + '/*.jpg') #os.listdir(img_path)
anno_list = os.listdir(anno_path)
#calculate the number of images
number_of_images = len(img_list)
#Initial variables to count the number of objects in each category (set to zero)
all_objects_count = 0 #all objects
closed_roof_tank_count = 0 #closed_roof_tank
narrow_closed_roof_tank_count = 0 #narrow_closed_roof_tank
external_floating_roof_tank_count = 0 #external_floating_roof_tank
spherical_tank_count = 0 #spherical_tank
sedimentation_tank_count = 0 #water_treatment_tank
water_tower_count = 0 #water_tower
undefined_object_count = 0 #undefined_object
#Create an list to save unknown object names
unknown_object_name = []
#"enumerate each image" This chunk is actually just getting the paths for the images and annotations
for i in range(len(img_list)):
img_file = img_list[i]
anno_file = anno_list[i]
#read .xml file
dom_tree = xml.dom.minidom.parse(anno_path + "/" + anno_file)
annotation = dom_tree.documentElement
file_name_list = annotation.getElementsByTagName('filename') #[<DOM Element: filename at 0x381f788>]
file_name = file_name_list[0].childNodes[0].data
object_list = annotation.getElementsByTagName('object')
for objects in object_list:
# print objects
all_objects_count += 1
namelist = objects.getElementsByTagName('name')
object_name = namelist[0].childNodes[0].data
if object_name == "closed_roof_tank":
closed_roof_tank_count += 1
elif object_name == "narrow_closed_roof_tank":
narrow_closed_roof_tank_count += 1
elif object_name == "external_floating_roof_tank":
external_floating_roof_tank_count += 1
elif object_name == "spherical_tank":
spherical_tank_count += 1
elif object_name == "sedimentation_tank":
sedimentation_tank_count += 1
elif object_name == "water_tower":
water_tower_count += 1
elif object_name == "undefined_object":
undefined_object_count += 1
else:
unknown_object_name.append(object_name)
summary_table = pd.DataFrame({"categories":["all_objects_count","closed_roof_tank_count", "narrow_closed_roof_tank_count",
"external_floating_roof_tank_count", "spherical_tank_count", "sedimentation_tank_count",
"water_tower_count", "undefined_object"],
"values": [all_objects_count, closed_roof_tank_count, narrow_closed_roof_tank_count, external_floating_roof_tank_count,
spherical_tank_count, sedimentation_tank_count, water_tower_count, undefined_object_count]})
summary_table.set_index('categories', inplace = True)
unknown_object_name = np.unique(unknown_object_name)
return summary_table, unknown_object_name, number_of_images
def dataset_summary_assessment(img_annotation_path, multiple = True):
"""
#### Iterate over the list of paths an create summary tables for each of the folders
"""
summary_table = pd.DataFrame({"categories":["all_objects_count","closed_roof_tank_count", "narrow_closed_roof_tank_count",
"external_floating_roof_tank_count", "spherical_tank_count", "sedimentation_tank_count",
"water_tower_count", "undefined_object"],
"values": [0]*8})
summary_table.set_index('categories', inplace = True)
unknown_object = []
number_of_images = 0
if multiple == True:
for i in range(len(img_annotation_path)):
summary_table_temp, unknown_object_array_temp, number_of_images_temp = summary_of_dataset(img_annotation_path[i,0],
img_annotation_path[i,1])
summary_table = summary_table.add(summary_table_temp)
unknown_object.append(unknown_object_array_temp)
number_of_images += number_of_images_temp
if multiple == False:
summary_table_temp, unknown_object_array_temp, number_of_images_temp = summary_of_dataset(img_annotation_path[0],
img_annotation_path[1])
summary_table = summary_table.add(summary_table_temp)
unknown_object.append(unknown_object_array_temp)
number_of_images += number_of_images_temp
summary_table.to_csv('outputs/summary_table.csv')
print("Array unknown objects", unknown_object)
print("The number of clipped images included in the assessment", number_of_images)
def tile_progress(tiles_completed, tiles_remaining):
print("tiles done")
tiles_completed = np.load(tiles_completed)
print(tiles_completed.shape)
print("tiles left to be done")
tile_remaining = np.load(tiles_remaining)
print(tile_remaining.shape)
print("Percent completed")
print(tiles_completed.shape[0]/(tile_remaining.shape[0] + tiles_completed.shape[0]))
#### Convert VOC to YOLO
def get_classes(class_text_file):
with open(class_text_file, "r") as class_text_file:
lines = class_text_file.readlines()
classes = []
for l in lines:
classes.append(l.replace("\n", ""))
return classes
def get_images_in_dir(dir_path):
image_list = []
for filename in glob(dir_path + '/*.jpg'):
image_list.append(filename)
return image_list
def get_annotations_in_dir(dir_path):
anno_list = []
for filename in glob(dir_path + '/*.txt'):
anno_list.append(filename)
return anno_list
def convert(size, box):
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[1])/2.0 - 1
y = (box[2] + box[3])/2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_annotation(dir_path, xml_path, output_path, image_path, classes):
basename = os.path.basename(image_path)
basename_no_ext = os.path.splitext(basename)[0]
in_file = open(xml_path + '/' + basename_no_ext + '.xml')
out_file = open(output_path + '/' + basename_no_ext + '.txt', 'w')
tree = et.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
## data augmentation
def convert_yolo_to_cv_format(yolo_annotation, H, W):
f = open(yolo_annotation, 'r')
data = f.readlines()
bboxes = []
for d in data:
c, x, y, w, h = map(float, d.split(' ')) #bounding box, class, x,y, cords, width, height
#l = int((x - w / 2) * W)
#r = int((x + w / 2) * W)
#t = int((y - h / 2) * H)
#b = int((y + h / 2) * H)
l = ((x - w / 2) * W)
r = ((x + w / 2) * W)
t = ((y - h / 2) * H)
b = ((y + h / 2) * H)
if l < 0:
l = 0
if r > W - 1:
r = W - 1
if t < 0:
t = 0
if b > H - 1:
b = H - 1
bboxes.append([l,t,r,b,c])
bboxes = np.array(bboxes)
return(bboxes)
def convert_cv_to_yolo_format(directory, output_filename, bboxes, H, W):
#Convert from opencv format to yolo format
# H,W is the image height and width
bboxes_str = []
for i in range(len(bboxes)):
bbox_W = round(((bboxes[i,2] - bboxes[i,0]) / W), 10)
bbox_H = round(((bboxes[i,3] - bboxes[i,1]) / H), 10)
center_bbox_x = round(((bboxes[i,0] + bboxes[i,2]) / (2 * W)), 10)
center_bbox_y = round(((bboxes[i,1] + bboxes[i,3]) / (2 * H)), 10)
bbox = [int(bboxes[i,4]),
center_bbox_x, center_bbox_y,
bbox_W, bbox_H, " \n"]
bbox = ' '.join(map(str,bbox))
bboxes_str.append(bbox)
print(bboxes_str)
f = open(directory + output_filename +'.txt', 'w')
f.writelines(bboxes_str)
f.close() #to change file access modes
#In progress
def identify_incorrectly_formated_files(img_annotation_path):
for i in range(len(img_annotation_path)): #index over each folder
path = Path(img_annotation_path[i,0])
parent_dir = path.parent.absolute()
img_files = os.listdir(img_annotation_path[i,0]) #pull the files in the img folder
anno_files = os.listdir(img_annotation_path[i,1]) #pull the files in the annotation folder
incorrectly_formatted_files = [] #intialize list to store location of incorrectly formated files
for ii in range(len(anno_files)): #index over each anno files
if anno_files[ii].endswith(".xml") != True: #create a list of the incorrectly formated files
incorrectly_formatted_files.append(anno_files[ii])
print(len(incorrectly_formatted_files))
if len(incorrectly_formatted_files) > 0:
os.mkdir(os.path.join(parent_dir,"chips_incorrect_format"))
os.mkdir(os.path.join(parent_dir, "chips_xml_incorrect_format"))
x = 0
"""
chips_incorrect_format = os.mkdir(path.join(parent_dir,"chips_incorrect_format"))
chips_xml_incorrect_format = os.mkdir(os.path.join(parent_dir, "chips_xml_incorrect_format"))
for anno_file in incorrectly_formatted_files: #get the file names in the annotation folder (without the extension)
anno_filename = os.path.splitext(anno_file)[0]
# copy images having same names with .xml files to "chips_tank" folder
for img_file in img_files:
img_filename = os.path.splitext(img_file)[0]
if anno_filename == img_filename:
x += 1
#shutil.move(os.path.join(img_annotation_path[i,0], img_file), os.path.join(parent_dir, "chips_incorrect_format"))
#shutil.move(os.path.join(img_annotation_path[i,1], anno_file), os.path.join(parent_dir, "chips_xml_incorrect_format"))
"""
print(x)
# Random Functions
def resize_256x256_to_515x512():
#Edit annotations
#Annotation resize
#read original annotations and write new xml files.
"""Resize annotation.
Args:
ImgPath (str): The path of the folder containing original images
Annopath (str): The path of the folder containing original annotation files
ProcessedPath (str): The path of folder to save new annotation files
imagelist (list): a list of original images
image_pre (str): The file name of the image
ext (str): The extension name of the image
imgfile (str): The path of the image
xmlfile (str): The path of the xml file of the image
DomTree, annotation, filenamelist, filename, objectlist are nodes in the xml file
xmins, xmaxs, ymins, ymaxs (int): locations of bounding box of tanks
names (str): label names
num (int): number of labels
filename_fill, filename_jpg (str): image name
dealpath (str): path to save new xml file
imagpath (str): path of the image
Returns:
create new xml files
"""
ImgPath = 'C:/Users/vcm/Desktop/NAIP/512_tank/'
AnnoPath = 'C:/Users/vcm/Desktop/NAIP/512_xml/'
ProcessedPath = 'C:/Users/vcm/Desktop/NAIP/512_xml_rename/'
os.makedirs(ProcessedPath, exist_ok=True)
imagelist = os.listdir(ImgPath)
for image in imagelist:
print('a new image:', image)
image_pre, ext = os.path.splitext(image)
imgfile = ImgPath + image
xmlfile = AnnoPath + image_pre + '.xml'
# Read original xml file content
DomTree = xml.dom.minidom.parse(xmlfile)
annotation = DomTree.documentElement
filenamelist = annotation.getElementsByTagName('filename') #[<DOM Element: filename at 0x381f788>]
filename = filenamelist[0].childNodes[0].data
objectlist = annotation.getElementsByTagName('object')
count = 0
xmins = []
xmaxs = []
ymins = []
ymaxs = []
names = []
for objects in objectlist:
# print objects
count = count + 1
namelist = objects.getElementsByTagName('name')
# print 'namelist:',namelist
# change label name
objectname = namelist[0].childNodes[0].data
if objectname == "closed roof tank" or objectname == "silo":
names.append("closed_roof_tank")
elif objectname == "external floating roof tank":
names.append("external_floating_roof_tank")
elif objectname == "sphere" or objectname == "spherical tank":
names.append("spherical_tank")
elif objectname == "water_treatment_facility" or objectname == "water treatment tank":
names.append("water_treatment_tank")
else:
names.append(objectname)
# write locations of bounding boxes
bndbox = objects.getElementsByTagName('bndbox')
cropboxes = []
for box in bndbox:
try:
x1_list = box.getElementsByTagName('xmin')
x1 = int(x1_list[0].childNodes[0].data)
y1_list = box.getElementsByTagName('ymin')
y1 = int(y1_list[0].childNodes[0].data)
x2_list = box.getElementsByTagName('xmax')
x2 = int(x2_list[0].childNodes[0].data)
y2_list = box.getElementsByTagName('ymax')
y2 = int(y2_list[0].childNodes[0].data)
x1_1 = x1
y1_1 = y1
x2_1 = x2
y2_1 = y2
img = Image.open(imgfile)
width,height = img.size
xmins.append(x1_1)
ymins.append(y1_1)
xmaxs.append(x2_1)
ymaxs.append(y2_1)
except Exception as e:
print(e)
num = count
print(num)
print(names)
filename_fill = image_pre
filename_jpg = filename_fill + ".jpg"
dealpath=ProcessedPath+ filename_fill +".xml"
with open(dealpath, 'w') as f:
height, width = (256, 256)
writexml(dealpath,filename_jpg,num,xmins,ymins,xmaxs,ymaxs,names, height, width)
def positive_images(path): #found in random script, compare the two functions to see if this one is needed
"""Save positive images' names.
Args:
path (string): The path to where positive images are stored.
image_list (list): A list of images' names without extensions
image_name (list): A list of images' names with extensions
Returns:
Create a .npy file in the path where you store this code.
"""
image_list = []
image_name = os.listdir(path)
for image in image_name:
filename=os.path.splitext(image)[0]
image_list.append(filename)
np.save("positive_image_list", image_list)
return
#old functions
def summary_of_dataset_inconsistent_data(img_path, anno_path):
### Define function to count the number of objects in each category
"""Get summary of the whole dataset
Args:
img_path (str): The path of the folder containing original images
anno_path (str): The path of the folder containing original annotation files
Returns:
summary_table (pandas df): A dataframe summary table of the number of objects in each class
unknown_object_name (array): An array of the labels ascribes to objects that are not counted in the other existing categories
number_of_images (int): the number of images in the summary table
"""
#Define lists
print(img_path)
img_list = glob(img_path + '/*.jpg') #os.listdir(img_path)
anno_list = os.listdir(anno_path)
#calculate the number of images
number_of_images = len(img_list)
#Initial variables to count the number of objects in each category (set to zero)
all_objects_count = 0 #all objects
closed_roof_tank_count = 0 #closed_roof_tank
narrow_closed_roof_tank_count = 0 #narrow_closed_roof_tank
water_tower_count = 0 #water_tower
external_floating_roof_tank_count = 0 #external_floating_roof_tank
spherical_tank_count = 0 #spherical_tank
water_treatment_tank_count = 0 #water_treatment_tank
water_tower_count = 0 #water_tower
undefined_object_count = 0 #undefined_object
#Create a list of the possible names that each category may take
closed_roof_tank_label_list = ["closed_roof_tank", "'closed roof tank'"]
narrow_closed_roof_tank_label_list = ["narrow_closed_roof_tank"]
external_floating_roof_tank_label_list = ["external_floating_roof_tank", 'external floating roof tank']
spherical_tank_label_list = ["spherical_tank", 'sphere', 'spherical tank']
water_treatment_tank_label_list = ["water_treatment_tank", 'water_treatment_plant', 'water_treatment_facility']
water_tower_label_list = ["water_tower"]
undefined_object_label_list = ["undefined_object", 'silo']
#Create an list to save unknown object names
unknown_object_name = []
#"enumerate each image" This chunk is actually just getting the paths for the images and annotations
for i in range(len(img_list)):
img_file = img_list[i]
#print(img_file)
anno_file = anno_list[i]
#read .xml file
dom_tree = xml.dom.minidom.parse(anno_path + "/" + anno_file)
annotation = dom_tree.documentElement
file_name_list = annotation.getElementsByTagName('filename') #[<DOM Element: filename at 0x381f788>]
file_name = file_name_list[0].childNodes[0].data
object_list = annotation.getElementsByTagName('object')
for objects in object_list:
# print objects
all_objects_count += 1
namelist = objects.getElementsByTagName('name')
object_name = namelist[0].childNodes[0].data
if object_name in closed_roof_tank_label_list:
closed_roof_tank_count += 1
elif object_name in narrow_closed_roof_tank_label_list:
narrow_closed_roof_tank_count += 1
elif object_name in external_floating_roof_tank_label_list:
external_floating_roof_tank_count += 1
elif object_name in spherical_tank_label_list:
spherical_tank_count += 1
elif object_name in water_treatment_tank_label_list:
water_treatment_tank_count += 1
elif object_name in water_tower_label_list:
water_tower_count += 1
elif object_name in undefined_object_label_list:
undefined_object_count += 1
else:
unknown_object_name.append(object_name)
summary_table = pd.DataFrame({"categories":["all_objects_count","closed_roof_tank_count", "narrow_closed_roof_tank_count",
"external_floating_roof_tank_count", "spherical_tank_count", "water_treatment_tank_count",
"water_tower_count", "undefined_object"],
"values": [all_objects_count, closed_roof_tank_count, narrow_closed_roof_tank_count, external_floating_roof_tank_count,
spherical_tank_count, water_treatment_tank_count, water_tower_count, undefined_object_count]})
summary_table.set_index('categories', inplace = True)
unknown_object_name = np.unique(unknown_object_name)
return summary_table, unknown_object_name, number_of_images
def img_path_anno_path_inconsistent_data(sub_directories):
"""
### Define a function to create a list of the annotation and positive_chip paths for each of the subdirectories
"Create an array of the paths to the folders containing the images and annotation given a subdirectory"
Only create paths for subdirectories that have these paths and for subdirectories that are correctly formated (Qianyu's thesis, etc.)
"""
img_path = []
anno_path = []
for i in range(len(sub_directories)):
if "chips" in os.listdir(sub_directories[i]):
img_path.append(sub_directories[i] + "/" + "chips_positive")
anno_path.append(sub_directories[i] + "/" + "chips_positive_xml")
else:
for ii in range(len(os.listdir(sub_directories[i]))):
print(sub_directories[i],"/", os.listdir(sub_directories[i])[ii])
img_path.append(sub_directories[i] + "/" + os.listdir(sub_directories[i])[ii] + "/" + "chips_positive")
anno_path.append(sub_directories[i] + "/" + os.listdir(sub_directories[i])[ii] + "/" + "chips_positive_xml")
img_annotation_path = np.empty((1,2)) #form a numpy array
for i in range(len(img_path)):
if os.path.isdir(img_path[i]) == True:
img_annotation_path = np.vstack((img_annotation_path,
[img_path[i], anno_path[i]]))
img_annotation_path = np.delete(img_annotation_path, 0, axis=0) #0 removes empty row
return img_annotation_path | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/az_proc.py | az_proc.py |
def get_img_xml_paths(img_paths_anno_paths):
img_paths = []
xml_paths = []
for directory in tqdm.tqdm(img_paths_anno_paths):
#get all the image and xml paths in directory of annotated images
remove_thumbs(directory[0])
img_paths += sorted(glob(directory[0] + "/*.jpg", recursive = True))
xml_paths += sorted(glob(directory[1] + "/*.xml", recursive = True))
return(img_paths, xml_paths)
def get_unique_tile_names(paths):
img_names = []
tile_names = []
for path in paths:
img_name = os.path.splitext(os.path.basename(path))[0]
if img_name.count("_") > 9:
tile_name = img_name.split("_",4)[-1].rsplit("_",1)[0] #state-year included in image name
else:
tile_name = img_name.rsplit("_",2)[0] #tile name formated image name
img_names.append(img_name)
tile_names.append(tile_name)
tile_names = sorted(np.unique(tile_names))
img_names = sorted(np.unique(img_names))
return(img_names, tile_names)
def get_tile_names(paths):
tile_names = []
img_names = []
for path in paths:
img_name = os.path.splitext(os.path.basename(path))[0]
if img_name.count("_") > 9:
tile_name = img_name.split("_",4)[-1].rsplit("_",1)[0] #state-year included in image name
else:
tile_name = img_name.rsplit("_",2)[0] #tile name formated image name
img_names.append(img_name)
tile_names.append(tile_name)
tile_names = sorted(tile_names)
img_names = sorted(img_names)
return(tile_names, img_names)
def make_by_tile_dirs(home_dir, tile_name):
#create folder to store corrected chips/xmls
tile_dir = os.path.join(home_dir, tile_name) #sub folder for each tile
chips_positive_path = os.path.join(tile_dir,"chips_positive") #images path
chips_positive_xml_path = os.path.join(tile_dir,"chips_positive_xml") #xmls paths
os.makedirs(tile_dir, exist_ok=True)
os.makedirs(chips_positive_path, exist_ok=True)
os.makedirs(chips_positive_xml_path, exist_ok=True)
return(tile_dir)
def read_tile(tile_path, item_dim = int(512)):
tile = cv2.imread(tile_path, cv2.IMREAD_UNCHANGED)
tile_height, tile_width, tile_channels = tile.shape #the size of the tile
row_index = math.ceil(tile_height/item_dim) #y
col_index = math.ceil(tile_width/item_dim) #x
return(tile, row_index, col_index)
def copy_and_replace_images_xml(img_name, img_path, xml_path, copy_dir):
####
new_img_path = os.path.join(copy_dir, "chips_positive", img_name + ".jpg")
shutil.copy(img_path, new_img_path)
new_xml_path = os.path.join(copy_dir, "chips_positive_xml", img_name + ".xml")
shutil.copy(xml_path, new_xml_path) #destination
def move_and_replace_images_xml(img_name, img_path, xml_path, copy_dir):
####
new_img_path = os.path.join(copy_dir, "chips_positive", img_name + ".jpg")
shutil.move(img_path, new_img_path)
new_xml_path = os.path.join(copy_dir, "chips_positive_xml", img_name + ".xml")
shutil.move(xml_path, new_xml_path) #destination
def compare_images(t_2_chip, labeled_img):
gray_t_2_chip = cv2.cvtColor(t_2_chip.astype(np.uint8), cv2.COLOR_BGR2GRAY) # make gray
gray_labeled_image = cv2.cvtColor(labeled_img.astype(np.uint8), cv2.COLOR_BGR2GRAY) #image that has been chipped from tile
score = compare_ssim(gray_t_2_chip, gray_labeled_image, win_size = 3) #set window size so that is works on the edge peices
if score >= 0.95: #If the labeled image is correct
#chip_name_incorrectly_chip_names[index]
return(True)
else: #if it is incorrect
## move incorrectly named image if it one of the same name has not already been moved
return(False)
def compare_move_imgs_standard(t_2_chip, x, y, tile_name, img_count, img_in_tile_paths, xml_in_tile_paths, img_in_tile_names,
compile_tile_dir, incorrect_dir):
img_name_wo_ext = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" # row_col
standard_img_name_wo_ext = [string for string in img_in_tile_names if img_name_wo_ext in string]
standard_img_name_wo_ext = list(set(standard_img_name_wo_ext))
standard_index, = np.where(np.isin(np.array(img_in_tile_names), standard_img_name_wo_ext))
if len(standard_index) >= 1:
for index in standard_index:
img_path = img_in_tile_paths[index]
xml_path = xml_in_tile_paths[index]
img_name = img_in_tile_names[index]
if compare_images(t_2_chip, cv2.imread(img_path)):
img_count += 1
copy_and_replace_images_xml(img_name, img_path, xml_path, compile_tile_dir) #use standard name and copy to compiled directory
#else:
# print(img_name,"\n",img_path)
# copy_and_replace_images_xml(img_name, img_path, xml_path, incorrect_dir) #move to incorrect directory
return(img_count)
#counter for image pathway
def compare_move_imgs_state_year(t_2_chip, x, y, tile_name, count, img_count,
img_in_tile_paths, xml_in_tile_paths, img_in_tile_names,
compile_tile_dir, incorrect_dir):
standard_quad_img_name_wo_ext = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" # row_col
img_name_wo_ext = tile_name + '_'+ str(count).zfill(6) #specify the chip names
state_year_img_name_wo_ext = [string for string in img_in_tile_names if img_name_wo_ext in string]
state_year_img_name_wo_ext = list(set(state_year_img_name_wo_ext))
state_year_index, = np.where(np.isin(np.array(img_in_tile_names), state_year_img_name_wo_ext))
if len(state_year_index) >= 1:
for index in state_year_index:
img_path = img_in_tile_paths[index]
xml_path = xml_in_tile_paths[index]
img_name = img_in_tile_names[index]
if compare_images(t_2_chip, cv2.imread(img_path)):
img_count += 1
copy_and_replace_images_xml(standard_quad_img_name_wo_ext, img_path, xml_path, compile_tile_dir) #use standard name and copy to compiled directory
#else:
# print(img_name, "\n",standard_quad_img_name_wo_ext, img_path)
# copy_and_replace_images_xml(img_name, img_path, xml_path, incorrect_dir) #move to incorrect directory
return(img_count)
#counter for image pathway
def iterate_over_tile_compare_move_state_year_by_image_name(tile_name, compiled_by_tile_dir, tile_dir_path,
images_do_not_match_names_dir, correctly_chipped_incorrect_dir,
img_paths, xml_paths, img_names, img_count_state_year, img_count_standard):
compile_tile_dir = make_by_tile_dirs(compiled_by_tile_dir, tile_name)
tile, row_index, col_index = read_tile(os.path.join(tile_dir_path, tile_name + ".tif")) #read in tile
img_in_tile_paths = [string for string in img_paths if tile_name in string]
xml_in_tile_paths = [string for string in xml_paths if tile_name in string]
img_in_tile_names = [string for string in img_names if tile_name in string]
assert len(img_in_tile_paths) == len(xml_in_tile_paths) == len(img_in_tile_names), "The same number of images and xmls"
count = 1
for y in range(0, row_index): #rows #use row_index to account for the previous errors in state/year naming conventions
for x in range(0, row_index): #cols
standard_quad_img_name_wo_ext = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" # row_col
state_year_img_name_wo_ext = tile_name + '_'+ str(count).zfill(6) #specify the chip names
t_2_chip = tile_to_chip_array(tile, x, y, int(512)) #get correct chip from tile
img_count_state_year = compare_move_imgs_state_year(t_2_chip, x, y, tile_name, count, img_count_state_year,
img_in_tile_paths, xml_in_tile_paths, img_in_tile_names,
compile_tile_dir, images_do_not_match_names_dir)
img_count_standard = compare_move_imgs_standard(t_2_chip, x, y, tile_name, img_count_standard,
img_in_tile_paths, xml_in_tile_paths, img_in_tile_names,
compile_tile_dir, correctly_chipped_incorrect_dir)
count += 1
return(img_count_state_year, img_count_standard)
def get_six_digit_index_from_img_path(state_year_img_paths):
six_digit_index = []
for img_path in state_year_img_paths:
img_name = os.path.splitext(os.path.basename(img_path))[0]
assert img_name.count("_") > 9, "Not state year format"
six_digit_index.append(img_name.rsplit("_",1)[-1])
return(six_digit_index)
def get_x_y_index(standard_img_paths):
xs = []
ys = []
for img_path in standard_img_paths:
img_name = os.path.splitext(os.path.basename(img_path))[0]
assert (img_name.count("_") < 9) and (img_name.split("_",1)[0] == "m"), "Not standard format"
y, x = img_name.split("_")[-2:] #y=row;x=col
ys.append(y)
xs.append(x)
return(ys,xs)
def compare_move_imgs_state_year_by_six_digit_index(x, y, tile_name, count, img_count, idxs, img_paths, xml_paths,
compile_tile_dir):
t_2_chip = tile_to_chip_array(tile, x, y, int(512)) #get correct chip from tile
#get standard and state_year img_names
standard_quad_img_name_wo_ext = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" # row_col
state_year_img_name_wo_ext = tile_name + '_'+ str(count).zfill(6) #specify the chip names
#identify img/xml that have been moved
#img_paths_copy = copy.copy(img_paths)
#xml_paths_copy = copy.copy(xml_paths)
assert len(img_paths) == len(xml_paths), "The same number of images and xmls"
#identify imgs/xmls that match the chip position
for idx in idxs:
img_path = img_paths[idx]
xml_path = xml_paths[idx]
if compare_images(t_2_chip, cv2.imread(img_path)):
img_count += 1
copy_and_replace_images_xml(standard_quad_img_name_wo_ext, img_path, xml_path, compile_tile_dir) #use standard name and copy to compiled directory
#remove img/xmls that have been moved from list
#img_paths_copy.remove(img_path)
#xml_paths_copy.remove(xml_path)
#else:
# copy_and_replace_images_xml(state_year_img_name_wo_ext, img_path, xml_path, incorrect_dir) #move to incorrect directory
return(img_count)#, img_paths_copy, xml_paths_copy)
#counter for image pathway
def iterate_over_tile_compare_move_state_year_by_six_digit_index(all_tile_names, compile_by_tile_state_year_dir, tile_dir_path,
state_year_img_paths, state_year_xml_paths, six_digit_index_list,
images_do_not_match_names_dir):
img_count_state_year = 0
for tile_name in tqdm.tqdm(all_tile_names):
compile_tile_dir = make_by_tile_dirs(compile_by_tile_state_year_dir, tile_name)
tile, row_index, col_index = read_tile(os.path.join(tile_dir_path, tile_name + ".tif")) #read in tile
count = 1
for y in range(0, row_index): #rows #use row_index to account for the previous errors in state/year naming conventions
for x in range(0, row_index): #cols
#get imgs/xmls where the count matches a la
six_digit_index = str(count).zfill(6)
indicies, = np.where(np.array(six_digit_index_list) == six_digit_index)
state_year_img_paths, state_year_xml_paths
if len(state_year_img_paths) > 0:
img_count_state_year, state_year_img_paths, state_year_xml_paths = compare_move_imgs_state_year_by_six_digit_index(x, y, tile_name, count, img_count_state_year, indicies,
state_year_img_paths, state_year_xml_paths,
compile_tile_dir)
count += 1
print(len(state_year_img_paths), len(state_year_xml_paths))
print(img_count_state_year)
def multi_iterate_over_tile_compare_move_state_year_by_six_digit_index(tile_name, compile_by_tile_state_year_dir, tile_dir_path,
state_year_img_paths, state_year_xml_paths, six_digit_index_list):
print(tile_name)
img_count_state_year = 0
compile_tile_dir = make_by_tile_dirs(compile_by_tile_state_year_dir, tile_name)
tile, row_index, col_index = read_tile(os.path.join(tile_dir_path, tile_name + ".tif")) #read in tile
count = 1
for y in range(0, row_index): #rows #use row_index to account for the previous errors in state/year naming conventions
for x in range(0, row_index): #cols
#get imgs/xmls where the count matches a la
six_digit_index = str(count).zfill(6)
indicies, = np.where(np.array(six_digit_index_list) == six_digit_index)
if len(state_year_img_paths) > 0:
img_count_state_year, state_year_img_paths, state_year_xml_paths = compare_move_imgs_state_year_by_six_digit_index(x, y, tile_name, count, img_count_state_year, indicies,
state_year_img_paths, state_year_xml_paths,
compile_tile_dir)
count += 1
print(len(state_year_img_paths), len(state_year_xml_paths))
print(img_count_state_year)
def make_tile_dir_and_get_correct_imgs(tile_name, compile_dir_path, tile_dir_path, correct_chip_dir_path):
compile_tile_dir = make_by_tile_dirs(compile_dir_path, tile_name) #make directory to store positive chips and xmls
tile, row_index, col_index = read_tile(os.path.join(tile_dir_path, tile_name + ".tif")) #read in tile
count = 1
for y in range(0, row_index): #rows #use row_index to account for the previous errors in state/year naming conventions
for x in range(0, row_index): #cols
t_2_chip = tile_to_chip_array(tile, x, y, int(512)) #get correct chip from tile
six_digit_idx = str(count).zfill(6)
cv2.imwrite(os.path.join(correct_chip_dir_path, tile_name + "-" + f"{y:02}" + "-" + f"{x:02}" + "-" + six_digit_idx+".jpg"), t_2_chip) #save images
count += 1
def make_tile_dir_and_get_correct_imgs_w_and_wo_black_sq(tile_name, compile_dir_path, tile_dir_path,
correct_chip_w_black_sq_dir_path,correct_chip_wo_black_sq_dir_path):
compile_tile_dir = make_by_tile_dirs(compile_dir_path, tile_name) #make directory to store positive chips and xmls
tile, row_index, col_index = read_tile(os.path.join(tile_dir_path, tile_name + ".tif")) #read in tile
item_dim = (int(512))
count = 1
for y in range(0, row_index): #rows #use row_index to account for the previous errors in state/year naming conventions
for x in range(0, row_index): #cols
#define image name
six_digit_idx = str(count).zfill(6)
t_2_chip_wo_black_sq_img_name=tile_name + "-" + f"{y:02}" + "-" + f"{x:02}" + "-" + six_digit_idx + ".jpg" #for compare analysis
standard_quad_img_name_wo_ext=tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" + ".jpg" # row_col #for save
#save images without black pixels added
t_2_chip_wo_black_sq = tile[y*item_dim:y*item_dim+item_dim, x*(item_dim):x*(item_dim)+item_dim]
if t_2_chip_wo_black_sq.size != 0:
#write image without black pixels added
cv2.imwrite(os.path.join(correct_chip_wo_black_sq_dir_path, t_2_chip_wo_black_sq_img_name), t_2_chip_wo_black_sq)
#write and save black pixels added
t_2_chip_w_black_sq = tile_to_chip_array(tile, x, y, int(512)) #get correct chip from tile
cv2.imwrite(os.path.join(correct_chip_w_black_sq_dir_path, standard_quad_img_name_wo_ext), t_2_chip_w_black_sq) #write images
count += 1
def compare_imgs_xmls_x_y_index_dcc(correct_img_path, state_year_six_digit_idx_list, state_year_img_paths, state_year_xml_paths, compile_dir):
#change to moving for dcc
#correct_img_path.rsplit("-",3) # tile name formated image name
correct_img_name = os.path.splitext(os.path.basename(correct_img_path))[0]
tile_name, y, x, six_digit_idx = correct_img_name.rsplit("-",3)
y = int(y)
x = int(x)
# all image
idxs, = np.where(np.array(state_year_six_digit_idx_list) == six_digit_idx)
tile_dir = os.path.join(compile_dir, tile_name) #sub folder for correct directory
if len(state_year_img_paths) > 0:
#get standard and state_year img_names
standard_quad_img_name_wo_ext = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" # row_col
#identify img/xml that have been moved
#img_paths_copy = copy.copy(img_paths)
#xml_paths_copy = copy.copy(xml_paths)
assert len(state_year_img_paths) == len(state_year_xml_paths), "The same number of images and xmls"
#identify imgs/xmls that match the chip position
for idx in idxs:
img_path = state_year_img_paths[idx]
xml_path = state_year_xml_paths[idx]
if os.path.exists(img_path) and os.path.exists(xml_path): #confirm image and xml is still there
if compare_images(cv2.imread(correct_img_path), cv2.imread(img_path)):
#move_and_replace_images_xml(standard_quad_img_name_wo_ext, img_path, xml_path, tile_dir) #use standard name and copy to compiled directory
copy_and_replace_images_xml(standard_quad_img_name_wo_ext, img_path, xml_path, tile_dir) #use standard name and copy to compiled directory
#remove img/xmls that have been moved from list
#img_paths_copy.remove(img_path)
#xml_paths_copy.remove(xml_path)
#return(img_paths_copy, xml_paths_copy)
def compare_imgs_wo_blk_pxls_state_yr_std_from_6_digit_xy_idxs(correct_img_wo_black_sq, correct_img_wo_black_sq_path,
compile_dir, state_year_six_digit_idx_list,
state_year_img_paths, state_year_xml_paths,
yx_list, standard_img_paths, standard_xml_paths):
#process correct img (wo black sq) info
correct_img_name = os.path.splitext(os.path.basename(correct_img_wo_black_sq_path))[0] #get correct img name
row_dim = correct_img_wo_black_sq.shape[0] #get row dim
col_dim = correct_img_wo_black_sq.shape[1] #get col dim
if min(row_dim, col_dim) >= 3:#compare function has a minimum window set to 3 pixels
tile_name, y, x, six_digit_idx = correct_img_name.rsplit("-",3) #identify tile name and indicies from correct img name
by_tile_dir = os.path.join(compile_dir, tile_name) #sub folder for correct directory
#get standard and state idxs that match the correct img
state_idxs, = np.where(np.array(state_year_six_digit_idx_list) == six_digit_idx)
standard_idxs, = np.where((yx_list == (y, x)).all(axis=1))
#turn the y/x into integers
y = int(y)
x = int(x)
standard_quad_img_name_wo_ext = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" # (row_col) get standard and state_year img_names
#identify imgs/xmls that match the chip position (state imgs)
for idx in state_idxs:
#get verified img/xml path
img_path = state_year_img_paths[idx]
xml_path = state_year_xml_paths[idx]
img = cv2.imread(img_path)
img = img[0:row_dim, 0:col_dim]
if (np.sum(img) != 0) & (compare_images(correct_img_wo_black_sq, img)): #only move images if they are not all black and they match the correct image
copy_and_replace_images_xml(standard_quad_img_name_wo_ext, img_path, xml_path, by_tile_dir) #use standard name and copy to compiled directory
#identify imgs/xmls that match the chip position (standard imgs)
for idx in standard_idxs:
img_path = standard_img_paths[idx]
xml_path = standard_xml_paths[idx]
img = cv2.imread(img_path)
img = img[0:row_dim, 0:col_dim]
if (np.sum(img) != 0) & (compare_images(correct_img_wo_black_sq, img)):
#print("match", correct_img_path, img_path)
copy_and_replace_images_xml(standard_quad_img_name_wo_ext, img_path, xml_path, by_tile_dir) #use standard name and copy to compiled directory
def rename_x_y_index_named_chips(compile_by_tile_dir, tile_names):
#change to moving for dcc
#correct_img_path.rsplit("-",3) # tile name formated image name
for tile in tqdm.tqdm(tile_names):
chip_by_tile_path = os.path.join(compile_by_tile_dir, tile, "chips")
chip_paths = sorted(glob(chip_by_tile_path + "/*.jpg", recursive = True))
for chip_path in chip_paths:
chip_name = os.path.splitext(os.path.basename(chip_path))[0]
if chip_name.count("-") > 0:
tile_name, y, x, six_digit_idx = chip_name.rsplit("-",3)
y = int(y)
x = int(x)
standard_quad_chip_path = os.path.join(chip_by_tile_path,
tile_name + '_' + f"{y:02}" + '_' + f"{x:02}"+".jpg") # row_col
os.rename(chip_path, standard_quad_chip_path)
def rename_x_y_index_named_chips_by_tile(compile_by_tile_dir, tile_name):
#change to moving for dcc
#correct_img_path.rsplit("-",3) # tile name formated image name
chip_by_tile_path = os.path.join(compile_by_tile_dir, tile_name, "chips")
chip_paths = sorted(glob(chip_by_tile_path + "/*.jpg", recursive = True))
for chip_path in chip_paths:
chip_name = os.path.splitext(os.path.basename(chip_path))[0]
if chip_name.count("-") > 0:
tile_name_split, y, x, six_digit_idx = chip_name.rsplit("-",3)
y = int(y)
x = int(x)
standard_quad_chip_path = os.path.join(chip_by_tile_path,
tile_name_split + '_' + f"{y:02}" + '_' + f"{x:02}"+".jpg") # row_col
os.rename(chip_path, standard_quad_chip_path)
####################################################################################################################
########## Identify images where the contents and naming conventions doe not match ##################################
#####################################################################################################################
def standard_name_verified_images_to_img_anno_by_tile_dir(verified_set_paths, img_anno_directory, incorrect_named_correctly_chipped_dir, tile_dir_path):
"""
After annotations (with standard quad naming convention) have been verified move images to directory organized by tile_name,
if the image contents are correct (row/col chip in tile).
Structure:
image_anno_dir
tile_name
chips_positive
chips_postiive_xml
Args:
verified_set_paths(str): path to folder containing verified sets
img_anno_directory(str): path to folder containing img and annotations for each tile by each folder (correct naming convention)
incorrect_named_correctly_chipped_dir(str): path to folder that will contain all of the incorrectly named images (correctly chipped)
tiles_dir(str): path to folder containing tiles
"""
for directory in tqdm.tqdm(verified_set_paths):
#get all the image and xml paths in directory of annotated images
remove_thumbs(directory[0])
#sort so that img/xml paths allign
labeled_img_paths = sorted(glob(directory[0] + "/*.jpg", recursive = True))
labeled_xml_paths = sorted(glob(directory[1] + "/*.xml", recursive = True))
#identify tiles in each folder
tiles = []
for img_path in labeled_img_paths:
img_name = os.path.splitext(os.path.basename(img_path))[0]
if img_name.count("_") > 9:
tile_name = img_name.split("_",4)[-1].rsplit("_",1)[0] #state included in image name
else:
tile_name = img_name.rsplit("_",2)[0] #tile name formated image name
tiles.append(tile_name)
tiles = np.unique(tiles)
#identify the images/xmls that correspond with each tile in folder
for tile_name in tiles:
labeled_img_paths_by_tile = [string for string in labeled_img_paths if tile_name in string]
labeled_xml_paths_by_tile = [string for string in labeled_xml_paths if tile_name in string]
assert len(labeled_img_paths_by_tile) == len(labeled_xml_paths_by_tile), "The same number of images and xmls"
#identify path to correct iamge path
#create folder to store corrected chips/xmls
img_anno_for_tile_path = os.path.join(img_anno_directory, tile_name) #sub folder for each tile
imgs_positive_dir_path = os.path.join(img_anno_for_tile_path, "chips_positive") #images path
imgs_positive_dir = os.makedirs(imgs_positive_dir_path, exist_ok = True)
imgs_positive_xml_dir_path = os.path.join(img_anno_for_tile_path, "chips_positive_xml") #xmls paths
imgs_positive_xml_dir = os.makedirs(imgs_positive_xml_dir_path, exist_ok=True)
#create folder to store incorrected chips/xmls
incorrect_imgs_positive_dir_path = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips_positive")
incorrect_imgs_positive_dir = os.makedirs(incorrect_imgs_positive_dir_path, exist_ok=True)
incorrect_imgs_positive_xml_dir_path = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips_positive_xml")
incorrect_imgs_positive_xml_dir = os.makedirs(incorrect_imgs_positive_xml_dir_path, exist_ok=True)
#read in tile
tile_path = os.path.join(tile_dir_path, tile_name + ".tif")
tile = cv2.imread(tile_path, cv2.IMREAD_UNCHANGED)
for i, (labeled_img_path, labeled_xml_path) in enumerate(zip(labeled_img_paths_by_tile, labeled_xml_paths_by_tile)):
# labeled image
img_name = os.path.splitext(os.path.basename(labeled_img_path))[0] #get image name
gray_labeled_img = cv2.cvtColor(cv2.imread(labeled_img_path), cv2.COLOR_BGR2GRAY) #load labeled image
# image from tile
y, x = img_name.split("_")[-2:] #name of tif with the extension removed; y=row;x=col
t_2_chip = tile_to_chip_array(tile, int(x), int(y), int(512)) # load tile to chip
gray_t_2_chip = cv2.cvtColor(t_2_chip.astype(np.uint8), cv2.COLOR_BGR2GRAY) # make gray
# check if images are the same
(score, diff) = compare_ssim(gray_labeled_img, gray_t_2_chip, full=True)
if score >= 0.95: #If the labeled image is correct; copy from verfied folder to img/anno tile folder
img_name_correct.append(img_name)
## move correct postive chip
shutil.copy(labeled_img_path, os.path.join(imgs_positive_dir_path, img_name + ".jpg")) #source, destination
shutil.copy(labeled_xml_path, os.path.join(imgs_positive_xml_dir_path, img_name + ".xml"))
else: #if it is incorrect
## move incorrectly named image if it one of the same name has not already been moved
if not os.path.exists(os.path.join(incorrect_imgs_positive_dir_path, img_name + ".jpg")):
shutil.copy(labeled_img_path, os.path.join(incorrect_imgs_positive_dir_path, img_name + ".jpg"))
else:
print("already exists")
## move incorrectly named xml if it one of the same name has not already been moved
if not os.path.exists(os.path.join(incorrect_imgs_positive_xml_dir_path, img_name + ".xml")):
shutil.copy(labeled_xml_path, os.path.join(incorrect_imgs_positive_xml_dir_path, img_name + ".xml")) #destination
else:
print("already exists")
def relocate_incorrect_image_content_and_naming(img_anno_directory, incorrect_named_correctly_chipped_dir, tile_dir):
"""
Identify images where the name and content are not correctly alligned (the tile_name and row/col do not match the content);
then rellocate to a new folder
Args:
img_anno_directory(str): path to folder containing img and annotations for each tile by each folder (correct naming convention)
tiles_dir(str): path to folder containing tiles
incorrect_named_correctly_chipped_dir(str): path to folder that will contain all of the incorrectly named images (correctly chippeD)
"""
tile_names = sorted(os.listdir(img_anno_directory))
for tile_name in tqdm.tqdm(tile_names):
# get paths to all positive images/mls
img_path = os.path.join(img_anno_directory, tile_name, "chips_positive")
xml_path = os.path.join(img_anno_directory, tile_name, "chips_positive_xml")
remove_thumbs(img_path)
# get all the image and xml paths in directory of annotated images
img_paths = sorted(glob(img_path + "/*.jpg", recursive = True))
xml_paths = sorted(glob(xml_path + "/*.xml", recursive = True))
## get path to directory containing chipped images
tile_to_chips_path = os.path.join(img_anno_directory, tile_name, "chips")
#read in tile
tile_path = os.path.join(tile_dir, tile_name + ".tif")
tile = cv2.imread(tile_path, cv2.IMREAD_UNCHANGED)
#get the image names
img_name_wo_ext = []
for i, (img_path, xml_path) in enumerate(zip(img_paths, xml_paths)):
#get image name
img_name = os.path.splitext(os.path.basename(img_path))[0]
img_name_wo_ext.append(img_name)
## load labeled image
gray_labeled_img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2GRAY) #image that had been labeled
## load correct image from tile
y, x = img_name.split("_")[-2:] #name of tif with the extension removed; y=row;x=col
t_2_chip = tile_to_chip_array(tile, int(x), int(y), int(512)) # load tile to chip
gray_t_2_chip = cv2.cvtColor(t_2_chip.astype(np.uint8), cv2.COLOR_BGR2GRAY) # make gray
## check if images are the same
(score, diff) = compare_ssim(gray_labeled_img, gray_t_2_chip, full=True)
if score < 0.95: #If the labeled image is inccorrect
# create and specify new directory path for chips
chips_dir_path = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips")
chips_dir = os.makedirs(chips_dir_path, exist_ok=True)
## move correct chip
t_2_chip_4_labeled_img_path = os.path.join(tile_to_chips_path, img_name + ".jpg")
shutil.copy(t_2_chip_4_labeled_img_path, os.path.join(chips_dir_path, img_name + ".jpg"))
# create and specify new directory paths for positive chips
chips_positive_dir_path = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips_positive")
chips_positive_dir = os.makedirs(chips_positive_dir_path, exist_ok=True)
## move incorrectly named image if it one of the same name has not already been moved
if not os.path.exists(os.path.join(chips_positive_dir_path, img_name + ".jpg")):
shutil.move(img_path, os.path.join(chips_positive_dir_path, img_name + ".jpg"))
else:
print("already exists")
#create and specify new directory paths for annotations
chips_positive_xml_dir_path = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips_positive_xml")
chips_positive_xml_dir = os.makedirs(chips_positive_xml_dir_path, exist_ok=True)
## move annotation corresponding to incorrectly named image if it one of the same name has not already been moved
if not os.path.exists(os.path.join(chips_positive_xml_dir_path, img_name + ".xml")):
shutil.move(xml_path, os.path.join(chips_positive_xml_dir_path, img_name + ".xml"))
else:
print("already exists")
def identify_correct_name_incorrectly_named_chipped_images(img_anno_directory, incorrect_named_correctly_chipped_dir, tile_dir):
"""
Where images are named incorrectly, but have the correct naming convention;
identify if the image corresponds with another image within a tile and move the image and annotation
Args:
img_anno_directory(str): path to folder containing img and annotations for each tile by each folder (correct naming convention)
tiles_dir(str): path to folder containing tiles
incorrect_named_correctly_chipped_dir(str): path to folder that will contain all of the incorrectly named images (correctly chippeD)
"""
tile_names = sorted(os.listdir(incorrect_named_correctly_chipped_dir))
for tile_name in tqdm.tqdm(tile_names):
#print(tile_name)
incorrectly_named_img_dir = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips_positive")
incorrectly_named_xml_dir = os.path.join(incorrect_named_correctly_chipped_dir, tile_name, "chips_positive_xml")
incorrectly_named_img_paths = sorted(glob(incorrectly_named_img_dir + "/*.jpg", recursive = True))
if len(incorrectly_named_img_paths) > 0:
#load tile
tile_path = os.path.join(tile_dir, tile_name + ".tif")
tile = cv2.imread(tile_path, cv2.IMREAD_UNCHANGED)
#tile characteristics
tile_height, tile_width, tile_channels = tile.shape #the size of the tile #determine tile dimensions
row_index = math.ceil(tile_height/512) #divide the tile into 512 by 512 chips (rounding up)
col_index = math.ceil(tile_width/512)
#specify path with correct images
img_dir_path = os.path.join(img_anno_directory, tile_name, "chips_positive") ## path positive images
xml_dir_path = os.path.join(img_anno_directory, tile_name, "chips_positive_xml") ## path annotations
for y in range(0, row_index): # rows
for x in range(0, col_index): # cols
t_2_chip = tile_to_chip_array(tile, x, y, int(512)) # load tile to chip
gray_t_2_chip = cv2.cvtColor(t_2_chip.astype(np.uint8), cv2.COLOR_BGR2GRAY) # make gray
for incorrectly_named_img_path in incorrectly_named_img_paths:
gray_incorrectly_named_img = cv2.cvtColor(cv2.imread(incorrectly_named_img_path), cv2.COLOR_BGR2GRAY) #read incorrect image
(score, diff) = compare_ssim(gray_t_2_chip, gray_incorrectly_named_img, full = True)
if score >= 0.95: #Save the same images
correct_img_name = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" + '.jpg' # The index is a six-digit number like '000023'.
correct_xml_name = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" + '.xml' # The index is a six-digit number like '000023'.
incorrect_img_name = os.path.splitext(os.path.basename(incorrectly_named_img_path))[0]
incorrectly_named_img_paths.remove(incorrectly_named_img_path)
#move image and rename
shutil.move(os.path.join(incorrectly_named_img_dir, incorrect_img_name + ".jpg"), #source
os.path.join(img_dir_path, correct_img_name)) #destination
#move anno and rename
shutil.move(os.path.join(incorrectly_named_xml_dir, incorrect_img_name + ".xml"), #source
os.path.join(xml_dir_path, correct_xml_name)) #destination
###########################################################################################################
############################# Identify incorrect/correct images ###########################################
def identify_correct_images(tile_dir, tiles_in_directory,
images_in_directory, images_in_directory_array,
image_directories):
"""
Find images that do not align with the tiles chipped ####flipped rows and columns####
Confirm that the standard tile name in the chip and the contents of the chip match
"""
#index over the tiles with corresponding images in the given directory
tile_names = []
correct_chip_names = []
correct_chip_paths = []
ys = []
xs = []
#correct_0_incorrect_1_images = []
#same_image_counter = 0
for tile_name in tiles_in_directory:
file_name, ext = os.path.splitext(tile_name) # File name
#get tile shape
item_dim = int(512)
tile = cv2.imread(os.path.join(tile_dir, tile_name),cv2.IMREAD_UNCHANGED)
tile_height, tile_width, tile_channels = tile.shape #the size of the tile #determine tile dimensions
row_index = math.ceil(tile_height/512) #divide the tile into 512 by 512 chips (rounding up)
col_index = math.ceil(tile_width/512)
count = 1
for y in range(0, col_index): #rows
for x in range(0, row_index): #cols
chip_name_temp = file_name+ '_' + str(count).zfill(6) + '.jpg'
#create a numpy array of each correctly chipped images
correct_image = tile_to_chip_array(tile, x, y, item_dim)
count += 1
#Identify if images that are contained in the directory of interest
confirmed_chips = [string for string in images_in_directory if chip_name_temp in string]
if len(confirmed_chips) > 0:
for confirmed_chip in confirmed_chips: #there may be duplicate images corresponding to the same standard tile name (nj and ny overlap)
#obtain a numpy array of the image in the directory of interest
index, = np.where(images_in_directory == confirmed_chip)
image_in_directory_array = images_in_directory_array[index[0]] #use the actual value of index (saved as an array)
image_directory = image_directories[index[0]]
##https://pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/
#https://pyimagesearch.com/2014/09/15/python-compare-two-images/
gray_image_in_directory_array = cv2.cvtColor(image_in_directory_array, cv2.COLOR_BGR2GRAY)
gray_correct_image = cv2.cvtColor(correct_image, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gray_image_in_directory_array, gray_correct_image, full=True)
diff = (diff * 255).astype("uint8")
if score >= 0.90:
tile_names.append(tile_name)
xs.append(x)
ys.append(y)
correct_chip_names.append(confirmed_chip)
correct_chip_paths.append(os.path.join(image_directory,confirmed_chip))
if (score < 0.90) and (score >= 0.80):
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.set_title('correct_image')
ax1.imshow(correct_image)
ax2.set_title('labeled_chip_array')
ax2.imshow(image_in_directory_array)
plt.show()
return(tile_names, xs, ys, correct_chip_names, correct_chip_paths)
def identify_incorrect_images(tile_dir, tiles_in_directory,
images_in_directory, images_in_directory_array,
image_directories):
"""
Find images that do not align with the tile chip
Confirm that the standard tile name in the chip and the contents of the chip match
"""
#index over the tiles with corresponding images in the given directory
tile_names = []
incorrect_chip_names = []
incorrect_chip_paths = []
ys = []
xs = []
#correct_0_incorrect_1_images = []
#same_image_counter = 0
for tile_name in tiles_in_directory:
file_name, ext = os.path.splitext(tile_name) # File name
#get tile shape
item_dim = int(512)
tile = cv2.imread(os.path.join(tile_dir, tile_name),cv2.IMREAD_UNCHANGED)
tile_height, tile_width, tile_channels = tile.shape #the size of the tile #determine tile dimensions
row_index = math.ceil(tile_height/512) #divide the tile into 512 by 512 chips (rounding up)
col_index = math.ceil(tile_width/512)
count = 1
for y in range(0, col_index):
for x in range(0, row_index):
chip_name_temp = file_name+ '_' + str(count).zfill(6) + '.jpg'
#create a numpy array of each correctly chipped images
correct_image = tile_to_chip_array(tile, x, y, item_dim)
count += 1
#Identify if images that are contained in the directory of interest
confirmed_chips = [string for string in images_in_directory if chip_name_temp in string]
if len(confirmed_chips) > 0:
for confirmed_chip in confirmed_chips: #there may be duplicate images corresponding to the same standard tile name (nj and ny overlap)
#obtain a numpy array of the image in the directory of interest
index, = np.where(images_in_directory == confirmed_chip)
image_in_directory_array = images_in_directory_array[index[0]] #use the actual value of index (saved as an array)
image_directory = image_directories[index[0]]
##https://pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/
#https://pyimagesearch.com/2014/09/15/python-compare-two-images/
gray_image_in_directory_array = cv2.cvtColor(image_in_directory_array, cv2.COLOR_BGR2GRAY)
gray_correct_image = cv2.cvtColor(correct_image, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gray_image_in_directory_array, gray_correct_image, full=True)
diff = (diff * 255).astype("uint8")
#if score >= 0.90:
# correct_0_incorrect_1_images.append(0)
if score < 0.90:
#print("different image")
tile_names.append(tile_name)
xs.append(x)
ys.append(y)
incorrect_chip_names.append(confirmed_chip)
incorrect_chip_paths.append(os.path.join(image_directory,confirmed_chip))
#print("SSIM: {}".format(score))
#correct_0_incorrect_1_images.append(1)
return(tile_names, xs, ys, incorrect_chip_names, incorrect_chip_paths)
def identify_incorrect_images_simultaneous(tile_dir, tiles_in_directory, images_path):
"""
Find images that do not align with the tile chip
Confirm that the standard tile name in the chip and the contents of the chip match
"""
#index over the tiles with corresponding images in the given directory
tile_names = []
incorrect_chip_names = []
incorrect_chip_paths = []
ys = []
xs = []
#correct_0_incorrect_1_images = []
#same_image_counter = 0
for tile_name in tqdm.tqdm(tiles_in_directory):
file_name, ext = os.path.splitext(tile_name) # File name
#get tile shape
item_dim = int(512)
tile = cv2.imread(os.path.join(tile_dir, tile_name),cv2.IMREAD_UNCHANGED)
tile_height, tile_width, tile_channels = tile.shape #the size of the tile #determine tile dimensions
row_index = math.ceil(tile_height/512) #divide the tile into 512 by 512 chips (rounding up)
col_index = math.ceil(tile_width/512)
count = 1
for y in range(0, col_index):
for x in range(0, row_index):
chip_name_temp = file_name+ '_' + str(count).zfill(6) + '.jpg'
#create a numpy array of each correctly chipped images
correct_image = tile_to_chip_array(tile, x, y, item_dim)
count += 1
#Identify if images that are contained in the directory of interest
labeled_chip_paths = [string for string in images_path if chip_name_temp in string]
if len(labeled_chip_paths) > 0:
for labeled_chip_path in labeled_chip_paths: #there may be duplicate images corresponding to the same standard tile name (nj and ny overlap)
#obtain a numpy array of the image in the directory of interest
index, = np.where(images_path == labeled_chip_path)
labeled_chip_array = cv2.imread(os.path.join(images_path[index[0]]),cv2.IMREAD_UNCHANGED) #open image
##https://pyimagesearch.com/2017/06/19/image-difference-with-opencv-and-python/
#https://pyimagesearch.com/2014/09/15/python-compare-two-images/
gray_labeled_chip_array = cv2.cvtColor(labeled_chip_array, cv2.COLOR_BGR2GRAY)
gray_correct_image = cv2.cvtColor(correct_image, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gray_labeled_chip_array, gray_correct_image, full=True)
diff = (diff * 255).astype("uint8")
#if score >= 0.90:
# correct_0_incorrect_1_images.append(0)
if score < 0.90:
#print("different image")
tile_names.append(tile_name)
xs.append(x)
ys.append(y)
incorrect_chip_paths.append(labeled_chip_path)
#print("SSIM: {}".format(score))
#correct_0_incorrect_1_images.append(1)
return(tile_names, xs, ys, incorrect_chip_paths) | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/compare.py | compare.py |
def incorrectly_chipped_image_and_correctly_chipped_names(incorrectly_chipped_images_path, remaining_chips_path, tiles_complete_dataset_path, tile_name):
""" Load tile of interest; chip the tile using the mxm, chip dimensions where m > n; Gather the previous chip name format, and the new chip name format;
save all images, record labeled images that contain relevant data (not black images); save images that were not labeled images;
Args:
incorrectly_chipped_images_path(str): path to folder that will contain all of the incorrect named, images chipped from times
remaining_chips_path(str): path to folder that will contain all of the remaining images that have not been labeled and correspond to tiles that have labeled images
tiles_complete_dataset_path(str): path to folder containing tiles
tile_name(str): name of tile without of extension
Returns:
ys(list): list of row indices
xs(list): list of column indicies
chip_name_incorrectly_chip_names(np array): the name of the images following the previous format for images that contain relevant data
chip_name_correct_chip_names(np array): the name of the images following the previous format for images that contain relevant data
"""
item_dim = int(512)
tile = cv2.imread(os.path.join(tiles_complete_dataset_path, tile_name + ".tif"), cv2.IMREAD_UNCHANGED)
tile_height, tile_width, tile_channels = tile.shape #the size of the tile
row_index = math.ceil(tile_height/512)
col_index = math.ceil(tile_width/512)
#print(row_index, col_index)
chip_name_incorrectly_chip_names = []
chip_name_correct_chip_names = []
ys = []
xs = []
count = 1
for y in range(0, row_index): #rows
for x in range(0, col_index): #cols
chip_img = tile_to_chip_array(tile, x, y, item_dim)
#specify the chip names
chip_name_incorrect_chip_name = tile_name + '_'+ str(count).zfill(6) + '.jpg'
chip_name_correct_chip_name = tile_name + '_' + f"{y:02}" + '_' + f"{x:02}" + '.jpg' # row_cols
if not os.path.exists(os.path.join(incorrectly_chipped_images_path, chip_name_incorrect_chip_name)):
cv2.imwrite(os.path.join(incorrectly_chipped_images_path, chip_name_incorrect_chip_name), chip_img) #save images
#save names of labeled images
if (y < col_index) and (x < col_index): #y:rows already annotated #x:(cols) images that contain relevant data (excludes extraneous annotations of black images)
ys.append(y)#row index
xs.append(x)#col index
chip_name_incorrectly_chip_names.append(chip_name_incorrect_chip_name)
chip_name_correct_chip_names.append(chip_name_correct_chip_name) # The index is a six-digit number like '000023'.
#save remaining images
if (y >= col_index): #y: we started at 0 here and 1 before? (save the remaining rows) #x:do not include extraneous black images
if not os.path.exists(os.path.join(remaining_chips_path, chip_name_correct_chip_name)):
cv2.imwrite(os.path.join(remaining_chips_path, chip_name_correct_chip_name), chip_img)
#counter for image pathway
count += 1
chip_name_incorrectly_chip_names = np.array(chip_name_incorrectly_chip_names)
chip_name_correct_chip_names = np.array(chip_name_correct_chip_names)
return(ys,xs,chip_name_incorrectly_chip_names, chip_name_correct_chip_names)
def reformat_xmls_for_rechipped_images(xml_directory, image_in_tile, correct_xml_name, correct_jpg_name, chips_positive_xml_dir_path):
""" reformat xml files for rechipped images to include resolution, year, updated filename, and updated path.
Args:
xml_directory(str): directory holding xmls
image_in_tile(str): path to image
correct_xml_name: correct name for xml
correct_jpg_name: correct name for jpgs
chips_positive_xml_dir_path(str): new path for xml
https://docs.python.org/3/library/xml.etree.elementtree.html
https://stackoverflow.com/questions/28813876/how-do-i-get-pythons-elementtree-to-pretty-print-to-an-xml-file
https://stackoverflow.com/questions/28813876/how-do-i-get-pythons-elementtree-to-pretty-print-to-an-xml-file
"""
#load xml
formatted_chip_name_wo_ext = os.path.splitext(os.path.basename(image_in_tile))[0]
tree = et.parse(os.path.join(xml_directory, formatted_chip_name_wo_ext +".xml"))
root = tree.getroot()
#add resolution to xml
resolution = et.Element("resolution")
resolution.text = formatted_chip_name_wo_ext.split("_")[1] #resolution
et.indent(tree, space="\t", level=0)
root.insert(3, resolution)
#add year to xml
year = et.Element("year")
year.text = formatted_chip_name_wo_ext.split("_")[2]#year
et.indent(tree, space="\t", level=0)
root.insert(4,year)
#correct spacing for source (dataset name)
et.indent(tree, space="\t", level=0)
#correct filename and path to formatting with row/col coordinates
for filename in root.iter('filename'):
filename.text = correct_xml_name
for path in root.iter('path'):
path.text = os.path.join(xml_directory, correct_jpg_name)
tree.write(os.path.join(chips_positive_xml_dir_path, correct_xml_name))
def copy_rename_labeled_images_xmls(xml_directory, images_in_tile, incorrectly_chipped_images_path, chips_positive_dir_path,
chips_positive_xml_dir_path, chip_name_incorrectly_chip_names, chip_name_correct_chip_names,
multiple_annotations_images, black_images_with_annotations):
""" reformat xml files for rechipped images to include resolution, year, updated filename, and updated path.
Args:
xml_directory(str): directory holding xmls
image_in_tile(str): path to image
incorrectly_chipped_images_path(str): path to folder that will contain all of the incorrect named, images chipped from times
chips_positive_dir_path(str): new path for jpg
chips_positive_xml_dir_path(str): new path for xml
chip_name_incorrectly_chip_names(np array): the name of the images following the previous format for images that contain relevant data
chip_name_correct_chip_names(np array): the name of the images following the previous format for images that contain relevant data
multiple_annotations_images: list of images with multiple annotations in a given folder
black_images_with_annotations: list of black images with annotations
Return:
ultiple_annotations_images, black_images_with_annotations
"""
for image_in_tile in images_in_tile:
#get the standard image name
formatted_image_name = os.path.basename(image_in_tile)
standard_image_name = formatted_image_name.split("_",4)[-1]
#get the index of the image array of image names #print(standard_image_name)
index, = np.where(chip_name_incorrectly_chip_names == standard_image_name)
if len(index) == 0: #If there there is no matching annotation (black image)
black_images_with_annotations.append(image_in_tile)
elif len(index) >= 1: #If there is a match
#make sure the image in the folder is correct
gray_labeled_image = cv2.cvtColor(cv2.imread(image_in_tile), cv2.COLOR_BGR2GRAY) #image that had been labeled
incorrectly_chipped_image_path = os.path.join(incorrectly_chipped_images_path, chip_name_incorrectly_chip_names[index[0]])
gray_known_image = cv2.cvtColor(cv2.imread(incorrectly_chipped_image_path), cv2.COLOR_BGR2GRAY) #image that has been chipped from tile
(score, diff) = compare_ssim(gray_labeled_image, gray_known_image, full=True)
if score >= 0.90: #If the labeled image is correct
#chip_name_incorrectly_chip_names[index]
correct_xml_name = os.path.splitext(chip_name_correct_chip_names[index[0]])[0] + ".xml"
correct_jpg_name = os.path.splitext(chip_name_correct_chip_names[index[0]])[0] + ".jpg"
#copy image
shutil.copyfile(incorrectly_chipped_image_path, os.path.join(chips_positive_dir_path, correct_jpg_name))
#create renamed xml
reformat_xmls_for_rechipped_images(xml_directory, image_in_tile, correct_xml_name, correct_jpg_name, chips_positive_xml_dir_path)
if len(index) > 1: #record images with multiple annotations
multiple_annotations_images.append(image_in_tile)
return(multiple_annotations_images, black_images_with_annotations)
def img_anno_paths_to_corrected_names_for_labeled_images_and_remaining_images(img_paths_anno_paths, correct_directory, incorrectly_chipped_images_path,
remaining_chips_path, tiles_complete_dataset_path):
""" iterate over all the image and xml paths in directory of annotated images; identify tiles and the corresonding images/xmls in each folder;
match name of previous naming convention and row/col naming convention;for labeled images and xmls, create folder to store,
identify correct images, copy, and rename; identify and save remaining images;
Args:
img_paths_anno_paths(np array): n x 2 array of jpg and xml paths
correct_directory(str): path to directory containing xmls and images with correct names
incorrectly_chipped_images_path(str): path to folder that will contain all of the incorrect named, images chipped from times
remaining_chips_path(str): path to folder that will contain all of the remaining images that have not been labeled and correspond to tiles that have labeled images
tiles_complete_dataset_path(str): path to folder containing tiles
Returns:
multiple_annotations_images: list of images with multiple annotations in a given folder
black_images_with_annotations: list of black images with annotations
"""
multiple_annotations_images = []
black_images_with_annotations = []
for directory in tqdm.tqdm(img_paths_anno_paths):
#get all the image and xml paths in directory of annotated images
print(directory)
remove_thumbs(directory[0])
image_paths = sorted(glob(directory[0] + "/*.jpg", recursive = True))
xml_paths = sorted(glob(directory[1] + "/*.xml", recursive = True))
#print(len(image_paths),len(xml_paths))
#identify tiles in each folder
tiles = []
for image in image_paths:
image_name = os.path.splitext(os.path.basename(image))[0]
if image_name.count("_") > 9:
tile_name = image_name.split("_",4)[-1].rsplit("_",1)[0] #state included in image name
else:
tile_name = image_name.rsplit("_",2)[0] #tile name formated image name
tiles.append(tile_name)
tiles = sorted(np.unique(tiles))
#identify the images/xmls that correspond with each tile in folder
for tile_name in tiles:
images_in_tile = [string for string in image_paths if tile_name in string]
xmls_in_tile = [string for string in xml_paths if tile_name in string]
assert len(images_in_tile) == len(xmls_in_tile), "The same number of images and xmls"
#print(tile_name, len(images_in_tile))
#create folder to store corrected chips/xmls
tile_dir_path = os.path.join(correct_directory, tile_name) #sub folder for each tile
chips_positive_dir_path = os.path.join(tile_dir_path,"chips_positive") #images path
chips_positive_xml_dir_path = os.path.join(tile_dir_path,"chips_positive_xml") #xmls paths
tile_dir = os.makedirs(tile_dir_path, exist_ok=True)
chips_positive_dir = os.makedirs(chips_positive_dir_path, exist_ok=True)
chips_positive_xml_dir = os.makedirs(chips_positive_xml_dir_path, exist_ok=True)
#identify and save remaining images; match name of previous naming convention and row/col naming convention
ys, xs, chip_name_incorrectly_chip_names, chip_name_correct_chip_names = incorrectly_chipped_image_and_correctly_chipped_names(incorrectly_chipped_images_path, remaining_chips_path, tiles_complete_dataset_path, tile_name)
#identify labeled images that are correct; copy and rename correct images and xmls
multiple_annotations_images, black_images_with_annotations = copy_rename_labeled_images_xmls(directory[1], images_in_tile, incorrectly_chipped_images_path,
chips_positive_dir_path, chips_positive_xml_dir_path,
chip_name_incorrectly_chip_names, chip_name_correct_chip_names,
multiple_annotations_images, black_images_with_annotations)
#remaining images
print("remaining images", len(os.listdir(remaining_chips_path)))
################################################################
############# Format tiles in tiles folder ###################
def formatted_tile_name_to_standard_tile_name(tile_name):
#format tile_names to only include inital capture date 1/20
tile_name = os.path.splitext(tile_name.split("_",4)[4])[0]
if tile_name.count("_") > 5:
tile_name = tile_name.rsplit("_",1)[0]
tile_name_with_ext = tile_name + ".tif"
return(tile_name_with_ext)
def rename_formatted_tiles(tiles_complete_dataset_path):
"""
"""
for tile in os.listdir(tiles_complete_dataset_path):
#format tile_names to only include inital capture date 1/20
if tile.count("_") > 5:
old_tile_path = os.path.join(tiles_complete_dataset_path, tile)
new_tile_name = formatted_tile_name_to_standard_tile_name(tile)
new_tile_path = os.path.join(tiles_complete_dataset_path, new_tile_name)
if not os.path.exists(new_tile_path): #If the new tile path does not exist, convert the tile to standard format
os.rename(old_tile_path, new_tile_path)
if os.path.exists(new_tile_path) and os.path.exists(old_tile_path): #If the new tile path already exists, delete the old tile path (if it still exists)
os.remove(old_tile_path)
#remove_thumbs(os.path.join(parent_directory, complete_dataset_path,"tiles"))
#rename_formatted_tiles(os.path.join(parent_directory, complete_dataset_path, "tiles"))
def formatted_chip_names_to_standard_names(chip):
"""
"""
chip = os.path.splitext(chip)[0]#remove ext
chip_name, chip_number = chip.rsplit("_",1)
tile_name = chip_name.split("_",4)[4]
if tile_name.count("_") > 5:
tile_name = tile_name.rsplit("_",1)[0]
standard_chip_name = tile_name + "_"+ chip_number
return(standard_chip_name)
def rename_formatted_chips_images_xmls(complete_dataset_path):
"""
Rename chips (jps/xmls)
"""
positive_images_path = os.path.join(complete_dataset_path,"chips_positive")
for chip in os.listdir(positive_images_path):
#format tile_names to only include inital capture date 1/20
if chip.count("_") > 6:
#old path
old_chip_path = os.path.join(positive_images_path, chip)
#new name
new_chip_name = formatted_chip_names_to_standard_names(chip)
#copy images
if not os.path.exists(os.path.join(complete_dataset_path,"standard_chips_positive", new_chip_name+".jpg")): #If the new tile path does not exist, convert the tile to standard format
shutil.copyfile(old_chip_path, os.path.join(complete_dataset_path,"standard_chips_positive", new_chip_name+".jpg"))
elif not os.path.exists(os.path.join(complete_dataset_path,"dups_chips_positive", new_chip_name+".jpg")): #If the new tile path does not exist, convert the tile to standard format
shutil.copyfile(old_chip_path, os.path.join(complete_dataset_path,"dups_chips_positive", new_chip_name+".jpg"))
else:
print("so many dups")
#copy annotations
if not os.path.exists(os.path.join(complete_dataset_path,"standard_chips_positive_xml", new_chip_name+".xml")): #If the new tile path does not exist, convert the tile to standard format
shutil.copyfile(old_chip_path, os.path.join(complete_dataset_path,"standard_chips_positive_xml", new_chip_name+".xml"))
elif not os.path.exists(os.path.join(complete_dataset_path,"dups_chips_positive_xml", new_chip_name+".xml")): #If the new tile path does not exist, convert the tile to standard format
shutil.copyfile(old_chip_path, os.path.join(complete_dataset_path,"dups_chips_positive_xml", new_chip_name+".xml"))
else:
print("so many dups")
#if os.path.exists(new_tile_path) and os.path.exists(old_tile_path): #If the new tile path already exists, delete the old tile path (if it still exists)
# os.remove(old_tile_path)
###
def identify_verified_jpgs_missing_annotations(verified_sets_parent_dir, verified_set_dir):
"""
Args:
verified_sets_parent_dir(str): Name of the parent folder holding verified images; Ex:"verified/verified_sets"
verified_set_dir(str): Name of verified set folder containing images without corresponding annotations; Ex:"verify_jaewon_poonacha_cleave_1"
Return:
jpgs_missing_xmls(list): list of jpgs without corresponding annotations in the verified folder of interest
jpgs_missing_xmls_path(list): list of paths containing xmls matching the jpgs missing annotations
"""
#get the xml ids w/o the ext
xmls = os.listdir(os.path.join(parent_directory,verified_sets_parent_dir, verified_set_dir, "chips_positive_xml"))
xmls_without_ext = []
for xml in xmls:
xmls_without_ext.append(os.path.splitext(xml)[0])
#get the jpg ids w/o the ext
jpgs = os.listdir(os.path.join(parent_directory, verified_sets_parent_dir, verified_set_dir,"chips_positive"))
jpgs_without_ext = []
for jpg in jpgs:
jpgs_without_ext.append(os.path.splitext(jpg)[0])
#identify jpgs tht are missing xmls
jpgs_missing_xmls = []
for xml in xmls_without_ext:
if xml not in jpgs_without_ext:
jpgs_missing_xmls.append(xml)
#identify possible xml path
all_xmls = glob(parent_directory + "/**/*.xml", recursive = True)
jpgs_missing_xmls_path =[]
for jpg in jpgs_missing_xmls:
jpg_path = [string for string in all_xmls if jpg in string]
if len(jpg_path) > 0:
jpgs_missing_xmls_path.append(jpg_path)
return(jpgs_missing_xmls, jpgs_missing_xmls_path)
####################### Identify Duplicates#####################################################################
def unique_by_first_dimension(a, images):
#https://stackoverflow.com/questions/41071116/how-to-remove-duplicates-from-a-3d-array-in-python
tmp = a.reshape(a.shape[0], -1)
b = np.ascontiguousarray(tmp).view(np.dtype((np.void, tmp.dtype.itemsize * tmp.shape[1])))
_, idx = np.unique(b, return_index=True)
unique_images = images[idx]
u, c = np.unique(b, return_counts=True)
dup = u[c > 1]
duplicate_images = images[np.where(np.isin(b,dup))[0]]
return(unique_images, duplicate_images)
de(arr1, arr2, arr3):
# Converting the arrays into sets
s1 = set(arr1)
s2 = set(arr2)
s3 = set(arr3)
# Calculates intersection of sets on s1 and s2
set1 = s1.intersection(s2) #[80, 20, 100]
# Calculates intersection of sets on set1 and s3
result_set = set1.intersection(s3)
# Converts resulting set to list
final_list = list(result_set)
print(len(final_list))
return(final_list)
def move_images(old_image_dir, new_image_dir, image_names):
#Ensure directory exists
os.makedirs(new_image_dir, exist_ok = True)
#move images
for image in image_names:
shutil.copyfile(os.path.join(old_image_dir,image),
os.path.join(new_image_dir,image))
def sorted_list_of_files(dups_chips_positive_path):
#https://thispointer.com/python-get-list-of-files-in-directory-sorted-by-size/#:~:text=order%20by%20size%3F-,Get%20list%20of%20files%20in%20directory%20sorted%20by%20size%20using,%2C%20using%20lambda%20x%3A%20os.
#Get list of files in directory sorted by size using os.listdir()
#list_of_files = filter( lambda x: os.path.isfile(os.path.join(dir_name, x)), os.listdir(dir_name) )
# Sort list of file names by size
#list_of_files = sorted( list_of_files,key = lambda x: os.stat(os.path.join(dir_name, x)).st_size)
sizes = []
dup_images = np.array(os.listdir(dups_chips_positive_path))
for image in dup_images:
sizes.append(os.stat(os.path.join(dups_chips_positive_path,image)).st_size)
sizes = np.array(sizes)
df = pd.DataFrame({'dups': dup_images,
'sizes': sizes})
df = df.sort_values(by=['sizes'])
df.to_csv('dup tile names.csv')
return(df)
def list_of_lists_positive_chips(chips_positive_path):
positive_chips = os.listdir(chips_positive_path)
positive_chips_lists = [positive_chips[x:x+1000] for x in range(0, len(positive_chips), 1000)]
return(positive_chips_lists)
def directory_tile_names(directory, output_file_name):
tiles = []
for image in os.listdir(directory):
img = os.path.splitext(image)[0] #name of tif with the extension removed
tile = img.rsplit("_",1)[0]
#print(tile.split("_",4)[4])
#tile = tile.split("_",4)[4] #get the tile names to remove duplicates from being downloaded
tiles.append(tile)
tiles = np.unique(tiles)
pd.DataFrame(tiles, columns = [output_file_name]).to_csv(output_file_name+'.csv')
def identify_all_paths_to_duplicate_images(parent_directory, duplicate_images):
entire_jpg = glob(parent_directory + "/**/*.jpg", recursive = True)
full_path = []
jpg_name = []
for jpg in entire_jpg:
if jpg.rsplit("\\")[-1] in duplicate_images:
full_path.append(jpg)
jpg_name.append(jpg.rsplit("\\")[-1])
df = pd.DataFrame({'jpg name': jpg_name,
'full path': full_path})
df.to_csv("duplicate_jpgs_full_path.csv")
def get_tile_names_from_chip_names(directory):
remove_thumbs(directory)
tile_names = []
for chip_name in os.listdir(directory):
chip_name = os.path.splitext(chip_name)[0]#remove ext
chip_name, _ = chip_name.rsplit("_",1)
tile_names.append(chip_name.split("_",4)[4] + ".tif")
return(np.unique(tile_names))
def positive_images_to_array(images_dir_path):
images = np.array(os.listdir(os.path.join(images_dir_path)))
image_array = np.zeros((len(images),512,512, 3), dtype='uint8')
image_directory = np.array([images_dir_path] *len(images))
for num in range(len(images)):
image = cv2.imread(os.path.join(images_dir_path, images[num]),cv2.IMREAD_UNCHANGED) #open image
image_array[num,:,:,:] = image
return(images, image_array, image_directory)
def positive_images_to_array_rgb(images_dir_path):
images = np.array(os.listdir(os.path.join(images_dir_path)))
imgsr = np.zeros((len(images),512,512), dtype='uint8')
imgsg = np.zeros((len(images),512,512), dtype='uint8')
imgsb = np.zeros((len(images),512,512), dtype='uint8')
for num in range(len(images)):
image = cv2.imread(os.path.join(images_dir_path, images[num]),cv2.IMREAD_UNCHANGED) #open image
imgsr[num,:,:] = image[:,:,0]
imgsg[num,:,:] = image[:,:,1]
imgsb[num,:,:] = image[:,:2]
return(images, imgsr, imgsg, imgsb)
def positive_images_to_array_correctly_labeled(images_dir_path, incorrect_labeled_chip_names_by_subfolder):
"""
For every image in the given subdirectory, the name and image contents have been verified and incorrect images have been identified
This function identifies the images that are correctly labeled
get image array for correctly labeled images
Args:
images_dir_path(str)
incorrect_labeled_chip_names_by_subfolder(list)
"""
subfolders_files = os.listdir(images_dir_path)
for chip in incorrect_labeled_chip_names_by_subfolder["incorrect_chip_names"].tolist():
if chip in subfolders_files:
subfolders_files.remove(chip)
correctly_labeled_images = np.array(subfolders_files) #image names
correctly_labeled_image_array = np.zeros((len(correctly_labeled_images),512,512, 3), dtype='uint8') #image contents
correctly_labeled_image_directory = np.array([images_dir_path] * len(correctly_labeled_images)) #image directory
for num in range(len(correctly_labeled_images)):
correctly_labeled_image = cv2.imread(os.path.join(images_dir_path, correctly_labeled_images[num]),cv2.IMREAD_UNCHANGED) #open image
correctly_labeled_image_array[num,:,:,:] = correctly_labeled_image
return(correctly_labeled_images, correctly_labeled_image_array, correctly_labeled_image_directory) | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/misc.py | misc.py |
def image_tile_characteristics(images_and_xmls_by_tile_path, tiles_dir):#, tile_name_tile_url, verified_positive_jpgs):
tile_names_by_tile = []
tile_paths_by_tile = []
#tile_urls_by_tile = []
tile_heights = []
tile_widths = []
tile_depths = []
min_utmx_tile = [] #NW_coordinates
min_utmy_tile = [] #NW_coordinates
max_utmx_tile = [] #SE_coordinates
max_utmy_tile = [] #SE_coordinates
utm_projection_tile = []
min_lon_tile = [] #NW_coordinates
min_lat_tile = [] #NW_coordinates
max_lon_tile = [] #SE_coordinates
max_lat_tile = [] #SE_coordinates
chip_names = []
tile_names_by_chip = []
tile_paths_by_chip = []
#tile_urls_by_chip = []
minx_pixel = []
miny_pixel = []
maxx_pixel = []
maxy_pixel = []
min_lon_chip = [] #NW_coordinates
min_lat_chip = [] #NW_coordinates
max_lon_chip = [] #SE_coordinates
max_lat_chip = [] #SE_coordinates
min_utmx_chip = [] #NW_coordinates
min_utmy_chip = [] #NW_coordinates
max_utmx_chip = [] #SE_coordinates
max_utmy_chip = [] #SE_coordinates
utm_projection_chip = []
row_indicies = []
col_indicies = []
image_paths = []
xml_paths = []
item_dim = int(512)
folders_of_images_xmls_by_tile = os.listdir(images_and_xmls_by_tile_path)
for tile_name in tqdm.tqdm(folders_of_images_xmls_by_tile):
#specify image/xml paths for each tile
positive_image_dir = os.path.join(images_and_xmls_by_tile_path, tile_name, "chips_positive")
remove_thumbs(positive_image_dir)
positive_xml_dir = os.path.join(images_and_xmls_by_tile_path, tile_name, "chips_positive_xml")
#load a list of images/xmls for each tile
positive_images = os.listdir(positive_image_dir)
positive_xmls = os.listdir(positive_xml_dir)
#read in tile
tile_path = os.path.join(tiles_dir, tile_name + ".tif")
#obtain tile url
#tile name/path/urls by tile
tile_names_by_tile.append(tile_name)
tile_paths_by_tile.append(tile_path)
#tile_url = [string for string in tile_name_tile_url[:,1] if tile_name in string][0]
#tile_urls_by_tile.append(tile_url)
#determine the utm coords for each tile
utmx, utmy, tile_band, tile_height, tile_width = tile_dimensions_and_utm_coords(tile_path)
tile_heights.append(tile_height)
tile_widths.append(tile_width)
tile_depths.append(tile_band)
min_utmx_tile.append(utmx[0]) #NW_coordinates
min_utmy_tile.append(utmy[0]) #NW_coordinates
max_utmx_tile.append(utmx[-1]) #SE_coordinates
max_utmy_tile.append(utmy[-1]) #SE_coordinates
utm_proj = get_utm_proj(tile_path)
utm_projection_tile.append(utm_proj)
minlon, minlat = transform_point_utm_to_wgs84(utm_proj, utmx[0], utmy[0])
maxlon, maxlat = transform_point_utm_to_wgs84(utm_proj, utmx[-1], utmy[-1])
min_lon_tile.append(minlon) #NW_coordinates
min_lat_tile.append(minlat) #NW_coordinates
max_lon_tile.append(maxlon) #SE_coordinates
max_lat_tile.append(maxlat) #SE_coordinates
for positive_image in positive_images: #iterate over each image affiliated with a given tile
#tile and chip names
chip_name = os.path.splitext(positive_image)[0]
chip_names.append(chip_name) # The index is a six-digit number like '000023'.
tile_names_by_chip.append(tile_name)
#path
tile_paths_by_chip.append(tile_path)
#tile_urls_by_chip.append(tile_url)
image_paths.append(os.path.join(positive_image_dir, positive_image))
xml_paths.append(os.path.join(positive_xml_dir, chip_name +".xml"))
#row/col indicies
y, x = chip_name.split("_")[-2:] #name of tif with the extension removed; y=row;x=col
y = int(y)
x = int(x)
row_indicies.append(y)
col_indicies.append(x)
#get the pixel coordinates (indexing starting at 0)
minx = x*item_dim
miny = y*item_dim
maxx = (x+1)*item_dim - 1
maxy = (y+1)*item_dim - 1
if maxx > tile_width:
maxx = tile_width - 1
if maxy > tile_height:
maxy = tile_height - 1
minx_pixel.append(minx) #NW (max: Top Left) # used for numpy crop
miny_pixel.append(miny) #NW (max: Top Left) # used for numpy crop
maxx_pixel.append(maxx) #SE (min: Bottom right)
maxy_pixel.append(maxy) #SE (min: Bottom right)
#determine the lat/lon
min_lon, min_lat = transform_point_utm_to_wgs84(utm_proj, utmx[minx], utmy[miny])
max_lon, max_lat = transform_point_utm_to_wgs84(utm_proj, utmx[maxx], utmy[maxy])
min_utmx_chip.append(utmx[minx]) #NW_coordinates
min_utmy_chip.append(utmy[miny]) #NW_coordinates
max_utmx_chip.append(utmx[maxx]) #SE_coordinates
max_utmy_chip.append(utmy[maxy]) #SE_coordinates
utm_projection_chip.append(utm_proj)
min_lon_chip.append(min_lon) #NW (max: Top Left) # used for numpy crop
min_lat_chip.append(min_lat) #NW (max: Top Left) # used for numpy crop
max_lon_chip.append(max_lon) #SE (min: Bottom right)
max_lat_chip.append(max_lat) #SE (min: Bottom right)
tile_characteristics = pd.DataFrame(data={'tile_name': tile_names_by_tile, 'tile_path': tile_paths_by_tile, #'tile_url': tile_urls_by_tile,
'tile_heights': tile_heights, 'tile_widths': tile_widths, 'tile_bands': tile_depths, 'min_utmx': min_utmx_tile, 'min_utmy': min_utmy_tile,
'max_utmx': max_utmx_tile, 'max_utmy': max_utmy_tile, 'utm_projection': utm_projection_tile,
'min_lon_tile': min_lon_tile,'min_lat_tile': min_lat_tile,'max_lon_tile': max_lon_tile,'max_lat_tile': max_lat_tile})
image_characteristics = pd.DataFrame(data={'chip_name': chip_names, 'image_path': image_paths, 'xml_path': xml_paths,'tile_name': tile_names_by_chip,
'row_indicies': row_indicies, 'col_indicies': col_indicies,'tile_path': tile_paths_by_chip, #'tile_url': tile_urls_by_chip,
'minx_pixel': minx_pixel, 'miny_pixel': miny_pixel, 'maxx_pixel': maxx_pixel,'maxy_pixel': maxy_pixel, 'utm_projection': utm_projection_chip,
'min_utmx': min_utmx_chip, 'min_utmy': min_utmy_chip, 'max_utmx': max_utmx_chip, 'max_utmy': max_utmy_chip,
'min_lon_chip': min_lon_chip,'min_lat_chip': min_lat_chip,'max_lon_chip': max_lon_chip, 'max_lat_chip': max_lat_chip})
tile_characteristics.to_csv("tile_characteristics.csv")
image_characteristics.to_csv("image_characteristics.csv")
return(tile_characteristics, image_characteristics) | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/data_characteristics.py | data_characteristics.py |
def create_tile_xml(tile_name, xml_directory, tile_resolution, tile_year,
tile_width, tile_height, tile_band):
tile_name_ext = tile_name + ".tif"
root = et.Element("annotation")
folder = et.Element("folder") #add folder to xml
folder.text = "tiles" #folder
root.insert(0, folder)
filename = et.Element("filename") #add filename to xml
filename.text = tile_name_ext #filename
root.insert(1, filename)
path = et.Element("path") #add path to xml
path.text = os.path.join(xml_directory, tile_name_ext) #path
root.insert(2, path)
resolution = et.Element("resolution") #add resolution to xml
resolution.text = tile_resolution #resolution
root.insert(3, resolution)
year = et.Element("year") #add year to xml
year.text = tile_year #year
root.insert(4,year)
source = et.Element("source") #add database to xml
database = et.Element("database")
database.text = "Tile Level Annotation" #
source.insert(0, database)
root.insert(5,source)
size = et.Element("size") #add size to xml
width = et.Element("width")
width.text = str(tile_width) #width
size.insert(0, width)
height = et.Element("height")
height.text = str(tile_height) #height
size.insert(1, height)
depth = et.Element("depth")
depth.text = str(tile_band) #depth
size.insert(2, depth)
root.insert(6,size)
tree = et.ElementTree(root)
et.indent(tree, space="\t", level=0)
#tree.write("filename.xml")
tree.write(os.path.join(xml_directory, tile_name +".xml"))
def add_objects(xml_directory, tile_name, obj_class,
obj_truncated, obj_difficult, obj_chip_name,
obj_xmin, obj_ymin, obj_xmax, obj_ymax):
tree = et.parse(os.path.join(xml_directory, tile_name + ".xml"))
root = tree.getroot()
obj = et.Element("object") #add size to xml
name = et.Element("name") #class
name.text = str(obj_class)
obj.insert(0, name)
pose = et.Element("pose") #pose
pose.text = "Unspecified"
obj.insert(1, pose)
truncated = et.Element("truncated")
truncated.text = str(obj_truncated) #
obj.insert(2, truncated)
difficult = et.Element("difficult")
difficult.text = str(obj_difficult)
obj.insert(3, difficult)
chip_name = et.Element("chip_name")
chip_name.text = str(obj_chip_name)
obj.insert(4, chip_name)
bndbox = et.Element("bndbox") #bounding box
xmin = et.Element("xmin") #xmin
xmin.text = str(obj_xmin)
bndbox.insert(0, xmin)
ymin = et.Element("ymin") #ymin
ymin.text = str(obj_ymin)
bndbox.insert(1, ymin)
xmax = et.Element("xmax") #xmax
xmax.text = str(obj_xmax)
bndbox.insert(2, xmax)
ymax = et.Element("ymax") #ymax
ymax.text = str(obj_ymax)
bndbox.insert(3, ymax)
obj.insert(5, bndbox)
root.append(obj)
tree = et.ElementTree(root)
et.indent(tree, space="\t", level=0)
tree.write(os.path.join(xml_directory, tile_name +".xml"))
def generate_tile_xmls(images_and_xmls_by_tile_path, tiles_dir, tiles_xml_path, item_dim):
folders_of_images_xmls_by_tile = os.listdir(images_and_xmls_by_tile_path)
for tile_name in tqdm.tqdm(folders_of_images_xmls_by_tile):
tile_name_ext = tile_name + ".tif"
#get tile dimensions ##replace with information from tile characteristics
da = rioxarray.open_rasterio(os.path.join(tiles_dir, tile_name_ext))
tile_band, tile_height, tile_width = da.shape[0], da.shape[1], da.shape[2]
#specify image/xml paths for each tile
positive_image_dir = os.path.join(images_and_xmls_by_tile_path, tile_name, "chips_positive")
positive_xml_dir = os.path.join(images_and_xmls_by_tile_path, tile_name, "chips_positive_xml")
#load a list of images/xmls for each tile
positive_images = os.listdir(positive_image_dir)
positive_xmls = os.listdir(positive_xml_dir)
for index, chip_xml in enumerate(positive_xmls):
#identify rows and columns
chip_name = os.path.splitext(chip_xml)[0]
y, x = chip_name.split("_")[-2:] #name of tif with the extension removed; y=row;x=col
y = int(y)
x = int(x)
minx = x*item_dim
miny = y*item_dim
#load each xml
tree = et.parse(os.path.join(positive_xml_dir, chip_xml))
root = tree.getroot()
#create the tile xml
if index == 0:
resolution = root.find('resolution').text
year = root.find('year').text
create_tile_xml(tile_name, tiles_xml_path, resolution, year,
tile_width, tile_height, tile_band)
#add the bounding boxes
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
obj_xmin = str(int(xmlbox.find('xmin').text) + minx)
obj_xmax = str(int(xmlbox.find('xmax').text) + minx)
obj_ymin = str(int(xmlbox.find('ymin').text) + miny)
obj_ymax = str(int(xmlbox.find('ymax').text) + miny)
add_objects(tiles_xml_path, tile_name, obj.find('name').text, obj.find('truncated').text,
obj.find('difficult').text, chip_name, obj_xmin, obj_ymin, obj_xmax, obj_ymax)
###################################################################################################################
#################################### Merge tile level XMLs to GDF ###########################################
###################################################################################################################
#Generate two text boxes a larger one that covers them
def merge_boxes(bbox1, bbox2):
""" Generate a bounding box that covers two bounding boxes
Arg:
bbox1(list): a list of the (ymin, xmin, ymax, xmax) coordinates for box 1
bbox2(list): a list of the (ymin, xmin, ymax, xmax) coordinates for box 2
Returns:
merged_bbox(list): a list of the (ymin, xmin, ymax, xmax) coordinates for the merged bbox
"""
return [min(bbox1[0], bbox2[0]),
min(bbox1[1], bbox2[1]),
max(bbox1[2], bbox2[2]),
max(bbox1[3], bbox2[3])]
#Computer a Matrix similarity of distances of the text and object
def calc_sim(bbox1, bbox2, dist_limit):
"""Determine the similarity of distances between bboxes to determine whether bboxes should be merged
Arg:
bbox1(list): a list of the (xmin, ymin, xmax, ymax) coordinates for box 1
bbox2(list): a list of the (xmin, ymin, xmax, ymax) coordinates for box 2
dist_list(int): the maximum threshold (pixel distance) to merge bounding boxes
Returns:
(bool): to indicate whether the bboxes should be merged
"""
# text: ymin, xmin, ymax, xmax
# obj: ymin, xmin, ymax, xmax
bbox1_xmin, bbox1_ymin, bbox1_xmax, bbox1_ymax = bbox1
bbox2_xmin, bbox2_ymin, bbox2_xmax, bbox2_ymax = bbox2
x_dist = min(abs(bbox2_xmin-bbox1_xmax), abs(bbox2_xmax-bbox1_xmin))
y_dist = min(abs(bbox2_ymin-bbox1_ymax), abs(bbox2_ymax-bbox1_ymin))
#define distance if one object is inside the other
if (bbox2_xmin <= bbox1_xmin) and (bbox2_ymin <= bbox1_ymin) and (bbox2_xmax >= bbox1_xmax) and (bbox2_ymax >= bbox1_ymax):
return(True)
elif (bbox1_xmin <= bbox2_xmin) and (bbox1_ymin <= bbox2_ymin) and (bbox1_xmax >= bbox2_xmax) and (bbox1_ymax >= bbox2_ymax):
return(True)
#determine if the bboxes are suffucuently close to each other
elif (x_dist <= dist_limit) and (abs(bbox2_ymin-bbox1_ymin) <= dist_limit*3) and (abs(bbox2_ymax-bbox1_ymax) <= dist_limit*3):
return(True)
elif (y_dist <= dist_limit) and (abs(bbox2_xmin-bbox1_xmin) <= dist_limit*3) and (abs(bbox2_xmax-bbox1_xmax) <= dist_limit*3):
return(True)
else:
return(False)
def merge_algo(characteristics, bboxes, dist_limit):
merge_bools = [False] * len(characteristics)
for i, (char1, bbox1) in enumerate(zip(characteristics, bboxes)):
for j, (char2, bbox2) in enumerate(zip(characteristics, bboxes)):
if j <= i:
continue
# Create a new box if a distances is less than disctance limit defined
merge_bool = calc_sim(bbox1, bbox2, dist_limit)
if merge_bool == True:
# Create a new box
new_box = merge_boxes(bbox1, bbox2)
bboxes[i] = new_box
#delete previous text boxes
del bboxes[j]
# Create a new text string
##chip_name list
if char1[0] != char2[0]: #if the chip_names are not the same
#make chip_names into an array
if type(char1[0]) == str:
chip_names_1 = np.array([char1[0]])
if type(char2[0]) == str:
chip_names_2 = np.array([char2[0]])
chip_names = np.concatenate((chip_names_1, chip_names_2),axis=0)
chip_names = np.unique(chip_names).tolist()
else:
chip_names = np.unique(char1[0]).tolist() #if the chip_names are not the same
#get object type
if char1[1] != char2[1]:
object_type = 'undefined_object'
object_type = char1[1]
characteristics[i] = [chip_names, object_type, 'Unspecified', '1', '1']
#delete previous text
del characteristics[j]
#return a new boxes and new text string that are close
merge_bools[i] = True
return merge_bools, characteristics, bboxes
def calculate_diameter(bbox, resolution = 0.6):
""" Calculate the diameter of a given bounding bbox for imagery of a given resolution
Arg:
bbox(list): a list of the (xmin, ymin, xmax, ymax) coordinates for box
resolution(float): the (gsd) resolution of the imagery
Returns:
(diameter): the diameter of the bbox of interest
"""
obj_xmin, obj_ymin, obj_xmax, obj_ymax = bbox
obj_width = obj_xmax - obj_xmin
obj_height = obj_ymax - obj_ymin
diameter = min(obj_width, obj_height) * resolution #meter
return(diameter)
def merge_tile_annotations(tile_characteristics, tiles_xml_dir, tiles_xml_list = None,
distance_limit = 5):
# https://stackoverflow.com/questions/55593506/merge-the-bounding-boxes-near-by-into-one
#specify tiles_xml_list
if tiles_xml_list is None: #if tiles_xml_list not provided, specify the tiles xml list
tiles_xml_list = os.listdir(tiles_xml_dir)
#lists for geosons/geodatabase
tile_names = []
chip_names = []
object_class = []
merged_bbox = []
geometry = []
minx_polygon_pixels = []
miny_polygon_pixels = []
maxx_polygon_pixels = []
maxy_polygon_pixels = []
polygon_vertices_pixels = []
nw_corner_polygon_lat = []
nw_corner_polygon_lon = []
se_corner_polygon_lat = []
se_corner_polygon_lon = []
polygon_vertices_lon_lat = []
utm_projection = []
diameter = []
for tile_xml in tqdm.tqdm(tiles_xml_list): #iterate over tiles
#save bboxes and characteristics
trunc_diff_objs_bboxes = []
trunc_diff_objs_characteristics = []
remaining_objs_bboxes = []
remaining_objs_characteristics = []
#get tilename/tile xml path
tile_name = os.path.splitext(tile_xml)[0]
tile_xml_path = os.path.join(tiles_xml_dir, tile_xml)
#load tile characteristics
tile_characteristics_subset = tile_characteristics[tile_characteristics.loc[:,"tile_name"] == tile_name]
tile_width = tile_characteristics_subset["tile_widths"].values[0]
tile_height = tile_characteristics_subset["tile_heights"].values[0]
tile_utmx_array = np.linspace(tile_characteristics_subset["min_utmx"].values[0],
tile_characteristics_subset["max_utmx"].values[0],
tile_width)
tile_utmy_array = np.linspace(tile_characteristics_subset["min_utmy"].values[0],
tile_characteristics_subset["max_utmy"].values[0],
tile_height)
utm_proj = tile_characteristics_subset["utm_projection"].values[0]
#load each xml
tree = et.parse(tile_xml_path)
root = tree.getroot()
for obj in root.iter('object'):
xmlbox = obj.find('bndbox') #get the bboxes
obj_xmin = xmlbox.find('xmin').text
obj_ymin = xmlbox.find('ymin').text
obj_xmax = xmlbox.find('xmax').text
obj_ymax = xmlbox.find('ymax').text
if int(obj_xmax) > tile_width:
obj_xmax = tile_width
if int(obj_ymax) > tile_height:
obj_ymax = tile_height
if (int(obj.find('difficult').text) == 1) or (int(obj.find('truncated').text) == 1): #get truncated bboxes/characteristics
trunc_diff_objs_bboxes.append([obj_xmin, obj_ymin, obj_xmax, obj_ymax])
trunc_diff_objs_characteristics.append([obj.find('chip_name').text, obj.find('name').text, obj.find('pose').text,
obj.find('truncated').text, obj.find('difficult').text])
else: #get remaining bboxes/characteristics
remaining_objs_bboxes.append([obj_xmin, obj_ymin, obj_xmax, obj_ymax])
remaining_objs_characteristics.append([obj.find('chip_name').text, obj.find('name').text, obj.find('pose').text,
obj.find('truncated').text, obj.find('difficult').text])
# Add merge bboxes
trunc_diff_objs_bboxes = np.array(trunc_diff_objs_bboxes).astype(np.int32)
trunc_diff_objs_bboxes = trunc_diff_objs_bboxes.tolist()
merged_bools, merged_characteristics, merged_bboxes = merge_algo(trunc_diff_objs_characteristics,
trunc_diff_objs_bboxes, distance_limit) #merge
for j, (merged_bool, char, bbox) in enumerate(zip(merged_bools, merged_characteristics, merged_bboxes)):
tile_names.append(tile_name)
chip_names.append(char[0])
object_class.append(char[1])
#state whether bbox were merged
merged_bbox.append(merged_bool)
#pixel coordiantes, 0 indexed
minx = bbox[0] - 1
miny = bbox[1] - 1
maxx = bbox[2] - 1
maxy = bbox[3] - 1
minx_polygon_pixels.append(minx)
miny_polygon_pixels.append(miny)
maxx_polygon_pixels.append(maxx)
maxy_polygon_pixels.append(maxy)
polygon_vertices_pixels.append([(minx,miny), (minx,maxy), (maxx,maxy), (maxx,miny)])
#geospatial data
utm_projection.append(utm_proj)
min_lon, min_lat = transform_point_utm_to_wgs84(utm_proj, tile_utmx_array[minx], tile_utmy_array[miny])
max_lon, max_lat = transform_point_utm_to_wgs84(utm_proj, tile_utmx_array[maxx], tile_utmy_array[maxy])
nw_corner_polygon_lon.append(min_lon)
nw_corner_polygon_lat.append(min_lat)
se_corner_polygon_lon.append(max_lon)
se_corner_polygon_lat.append(max_lat)
polygon_vertices_lon_lat.append([(min_lon,min_lat),(min_lon,max_lat),(max_lon,max_lat),(max_lon,min_lat)])
geometry.append(Polygon([(min_lon,min_lat),(min_lon,max_lat),(max_lon,max_lat),(max_lon,min_lat)]))
#calculate diameter
diameter.append(calculate_diameter(bbox))
#Add remaining bboxes
remaining_objs_bboxes = np.array(remaining_objs_bboxes).astype(np.int32)
remaining_objs_bboxes = remaining_objs_bboxes.tolist()
for j, (char, bbox) in enumerate(zip(remaining_objs_characteristics,remaining_objs_bboxes)):
tile_names.append(tile_name)
chip_names.append(char[0])
object_class.append(char[1])
#state whether bbox were merged
merged_bbox.append(merged_bool)
#pixel coordiantes
minx_polygon_pixels.append(bbox[0])
miny_polygon_pixels.append(bbox[1])
maxx_polygon_pixels.append(bbox[2])
maxy_polygon_pixels.append(bbox[3])
polygon_vertices_pixels.append([(bbox[0],bbox[1]), (bbox[0],bbox[3]), (bbox[2],bbox[3]), (bbox[2],bbox[1])])
#geospatial data
utm_projection.append(utm_proj)
min_lon, min_lat = transform_point_utm_to_wgs84(utm_proj, tile_utmx_array[bbox[0]-1], tile_utmy_array[bbox[1]-1])
max_lon, max_lat = transform_point_utm_to_wgs84(utm_proj, tile_utmx_array[bbox[2]-1], tile_utmy_array[bbox[3]-1])
nw_corner_polygon_lon.append(min_lon)
nw_corner_polygon_lat.append(min_lat)
se_corner_polygon_lon.append(max_lon)
se_corner_polygon_lat.append(max_lat)
polygon_vertices_lon_lat.append([(min_lon,min_lat), (min_lon,max_lat), (max_lon,max_lat), (max_lon,min_lat)])
geometry.append(Polygon([(min_lon,min_lat), (min_lon,max_lat), (max_lon,max_lat), (max_lon,min_lat)]))
#calculate diameter
diameter.append(calculate_diameter(bbox))
#create geodatabase
gdf = gpd.GeoDataFrame({'tile_name': tile_names,'chip_name': chip_names,
"minx_polygon_pixels": minx_polygon_pixels, "miny_polygon_pixels": miny_polygon_pixels, #min lon/lat
"maxx_polygon_pixels": maxx_polygon_pixels, "maxy_polygon_pixels": maxy_polygon_pixels, #max lat
"polygon_vertices_pixels": polygon_vertices_pixels, "utm_projection": utm_projection,
"nw_corner_polygon_lat": nw_corner_polygon_lat, "nw_corner_polygon_lon": nw_corner_polygon_lon,#min lon/lat
"se_corner_polygon_lat": se_corner_polygon_lat, "se_corner_polygon_lon": se_corner_polygon_lon, #min lon/lat
"polygon_vertices_lon_lat": polygon_vertices_lon_lat,'geometry': geometry,
"object_class": object_class, 'diameter (m)': diameter, 'merged_bbox': merged_bbox})
return(gdf)
######################################################################################################################################################
###################################### Inundation Values for Tile Database ##################################################
######################################################################################################################################################
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that rasterio wants them"""
return [json.loads(gdf.to_json())['features'][0]['geometry']]
def identify_inundation_for_tanks(gdf, sc_slosh_inundation_map_path):
#identify inundation bounds
category = []
geometry = []
for i in range(1,6): #get the bounding box polygons
sc_slosh_inundation_map_name = "SC_Category" + str(i) + "_MOM_Inundation_HighTide_EPSG4326.tif"
sc_slosh_inundation_map = rasterio.open(os.path.join(sc_slosh_inundation_map_path, sc_slosh_inundation_map_name))
min_lon, min_lat, max_lon, max_lat = sc_slosh_inundation_map.bounds
category.append("SC_Category" + str(i))
geometry.append(Polygon([(min_lon,min_lat),(min_lon,max_lat),(max_lon,max_lat),(max_lon,min_lat)]))
#make dataframe of inundation map bounds
d = {'category': category,'geometry': geometry}
sc_slosh = gpd.GeoDataFrame(d)
#idntify useful bounding box
if sc_slosh["geometry"].nunique() == 1: #all of the bounding boxes for the inundation maps are the same
sc_inundation_poly = sc_slosh["geometry"].unique()[0]
#create dictionary for inundation values for each tank
inundation_level_for_tank = {}
for i in range(1,6):
inundation_level_for_tank["Category" + str(i)] = np.zeros((len(gdf)))
#make a list to record whether the inundation level has been recorded
bbox_within_inundation_bounds = [False] * len(gdf)
#get inundation values
for index, poly in enumerate(gdf["geometry"]): #iterate over the polygons
if sc_inundation_poly.contains(poly): #identify whether the bbox is inside of the inundation map
bbox_within_inundation_bounds[index] = True #record that the bbox is within the inundation map
#make a geodatabaframe for each polygon that is
geo = gpd.GeoDataFrame({'geometry': poly}, index=[0], crs="EPSG:4326")
coords = getFeatures(geo)
for i in range(1,6): #get the bounding box polygons
sc_slosh_inundation_map_name = "SC_Category" + str(i) + "_MOM_Inundation_HighTide_EPSG4326.tif"
sc_slosh_inundation_map = rasterio.open(os.path.join(sc_slosh_inundation_map_path, sc_slosh_inundation_map_name))
out_img, out_transform = rasterio.mask.mask(dataset=sc_slosh_inundation_map, shapes=coords, crop=True)
if np.all(out_img == 255): #check if all inundation values are equal the no value entry (255)
inundation_level_for_tank["Category" + str(i)][index] = 0
else:
out_img = np.where(out_img >= 255, 0, out_img)
inundation_level_for_tank["Category" + str(i)][index] = np.average(out_img)
#add inundation values to tank database
gdf["bbox_within_inundation_bounds"] = bbox_within_inundation_bounds
for i in range(1,6):
gdf["Category" + str(i)] = inundation_level_for_tank["Category" + str(i)]
return(gdf)
######################################################################################################################################################
###################################### State Names for Tile Database ##################################################
######################################################################################################################################################
def identify_state_name_for_each_state(states_gpds_path, gdf):
#https://gis.stackexchange.com/questions/251812/returning-percentage-of-area-of-polygon-intersecting-another-polygon-using-shape
states_gpds = gpd.read_file(states_gpds_path)
states_gds_epsg4326 = states_gpds.to_crs(epsg=4326) #reproject to lat lon
#get state for each polygon
state_list = [None] * len(gdf)
for tank_index, tank_poly in tqdm.tqdm(enumerate(gdf["geometry"])): #iterate over the tank polygons
for state_index, state_poly in enumerate(states_gds_epsg4326["geometry"]): #iterate over the state polygons
if state_poly.intersects(tank_poly) or state_poly.contains(tank_poly): #identify whether the tank bbox is inside of the state polygon
if state_list[tank_index] == None:
state_list[tank_index] = states_gds_epsg4326.iloc[state_index]["NAME"] #add state name for each tank to list
else:
index, = np.where(states_gds_epsg4326["NAME"] == state_list[tank_index]) #check percent of tank that intersects with current state
prev_state_poly = states_gds_epsg4326["geometry"][index[0]]
prev_state_poly_intersection_area = tank_poly.intersection(prev_state_poly).area/tank_poly.area #check percent of tank that intersects with prev_state_poly
proposed_state_poly_intersection_area = tank_poly.intersection(state_poly).area/tank_poly.area #check percent of tank that intersects with proposed state
if proposed_state_poly_intersection_area > prev_state_poly_intersection_area: #change the state if the polygon mainly resides in a different state
state_list[tank_index] = states_gds_epsg4326.iloc[state_index]["NAME"]
#add states to dataframe
state_list = np.array(state_list)
gdf["state"] = state_list
return(gdf) | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/merge.py | merge.py |
def reclassify_narrow_closed_roof_and_closed_roof_tanks(xml_path):
""" Reclassify Narrow Closed Roof and Closed Roof Tanks
"""
#load each xml
class_ob = []
tree = et.parse(xml_path)
root = tree.getroot()
for obj in root.iter('object'):
name = obj.find("name").text
xmlbox = obj.find('bndbox') #get the bboxes
obj_xmin = xmlbox.find('xmin').text
obj_ymin = xmlbox.find('ymin').text
obj_xmax = xmlbox.find('xmax').text
obj_ymax = xmlbox.find('ymax').text
width = int(obj_xmax) - int(obj_xmin)
height = int(obj_ymax) - int(obj_ymin)
if (int(obj.find('difficult').text) == 0) and (int(obj.find('truncated').text) == 0):
#if a closed roof tank is less than or equal to the narrow closed roof tank threshold than reclassify as narrow closed roof tank
if (name == "closed_roof_tank") and (width <= 15) and (height <= 15):
name = "narrow_closed_roof_tank"
#if a narrow closed roof tank is greater than the closed roof tank threshold than reclassify as closed roof tank
if (name == "narrow_closed_roof_tank") and (width > 15) and (height > 15):
name = "closed_roof_tank"
tree.write(os.path.join(xml_path))
def correct_inconsistent_labels_xml(xml_dir):
#Create a list of the possible names that each category may take
correctly_formatted_object = ["closed_roof_tank","narrow_closed_roof_tank",
"external_floating_roof_tank","sedimentation_tank",
"water_tower","undefined_object","spherical_tank"]
object_dict = {"closed_roof_tank": "closed_roof_tank",
"closed_roof_tank ": "closed_roof_tank",
"closed roof tank": "closed_roof_tank",
"narrow_closed_roof_tank": "narrow_closed_roof_tank",
"external_floating_roof_tank": "external_floating_roof_tank",
"external floating roof tank": "external_floating_roof_tank",
'external_floating_roof_tank ': "external_floating_roof_tank",
'external_closed_roof_tank': "external_floating_roof_tank",
"water_treatment_tank": "sedimentation_tank",
'water_treatment_tank ': "sedimentation_tank",
"water_treatment_plant": "sedimentation_tank",
"water_treatment_facility": "sedimentation_tank",
"water_tower": "water_tower",
"water_tower ": "water_tower",
'water_towe': "water_tower",
"spherical_tank":"spherical_tank",
'sphere':"spherical_tank",
'spherical tank':"spherical_tank",
"undefined_object": "undefined_object",
"silo": "undefined_object" }
#"enumerate each image" This chunk is actually just getting the paths for the images and annotations
for xml_file in os.listdir(xml_dir):
# use the parse() function to load and parse an XML file
tree = et.parse(os.path.join(xml_dir, xml_file))
root = tree.getroot()
for obj in root.iter('object'):
for name in obj.findall('name'):
if name.text not in correctly_formatted_object:
name.text = object_dict[name.text]
if int(obj.find('difficult').text) == 1:
obj.find('truncated').text = '1'
obj.find('difficult').text = '1'
if int(obj.find('truncated').text) == 1:
obj.find('truncated').text = '1'
obj.find('difficult').text = '1'
tree.write(os.path.join(xml_dir, xml_file)) | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/correct_labels.py | correct_labels.py |
# Standard packages
#from __future__ import print_function
import warnings
import urllib
import shutil
import os
import math
import json
import tqdm
from glob import glob
import xml.dom.minidom
from xml.dom.minidom import parseString
import xml.etree.ElementTree as et
from xml.dom import minidom
#install standard
import numpy as np
import pandas as pd
import cv2
import matplotlib
import matplotlib.pyplot as plt
import fiona #must be import before geopandas
import geopandas as gpd
import rasterio
import rioxarray
import re #pip install regex
import rtree
import pyproj
import shapely
from shapely.ops import transform
from shapely.geometry import Polygon, Point, MultiPoint, MultiPolygon, MultiLineString
from skimage.metrics import structural_similarity as compare_ssim
#import imutils
#import psutil
#Parsing/Modifying XML
from lxml.etree import Element,SubElement,tostring
import data_eng.az_proc as ap
## Write files
def write_list(list_, file_path):
"""
Write data from a list
Args:
list_ (str): data stored in a list
file_path (str): a file path to store the list
"""
print("Started writing list data into a json file")
with open(file_path, "w") as fp:
json.dump(list_, fp)
print("Done writing JSON data into .json file")
# Read list to memory
def read_list(file_path):
"""
Read data writen to a list
Args:
file_path (str): a file path to store the list
Returns:
list_ (str): data stored in a list
"""
# for reading also binary mode is important
with open(file_path, 'rb') as fp:
list_ = json.load(fp)
return list_
######################################################################################################################################################
###################################### Write Tile Level Annotations (shift to write gdf) ##################################################
######################################################################################################################################################
def write_gdf(gdf, output_filepath, output_filename = 'tile_level_annotations'):
gdf.crs = "EPSG:4326" #assign projection
#save geodatabase as json
with open(os.path.join(output_filepath, output_filename+".json"), 'w') as file:
file.write(gdf.to_json())
##save geodatabase as geojson
with open(os.path.join(output_filepath, output_filename+".geojson"), "w") as file:
file.write(gdf.to_json())
##save geodatabase as shapefile (specify columns to drop as a arg
gdf_shapefile = gdf.drop(columns=["chip_name","polygon_vertices_pixels","polygon_vertices_lon_lat"])
gdf_shapefile.to_file(os.path.join(output_filepath,output_filename+".shp"))
####################################################### get png images ###################################################
########################################################################################
def png4jpg(image_dir, new_image_dir, tiles_dir, item_dim = int(512)):
""" Get the png for labeled images
Load tile of interest; Identify labeled images, and save labeled images as png
Args:
new_image_dir(str): path to folder that will contain the png images
image_dir(str): path to folder contain labeled images
tiles_dir(str): path to folder containing tiles
"""
#get list of images
remove_thumbs(image_dir) #remove thumbs db first
images = os.listdir(image_dir)
#get list of tile names and list of image names (without extensions
image_names = []
tile_names = []
for image in images: #iterate over annotated images
image_names.append(os.path.splitext(image)[0]) #remove extension
tile_names.append(image.rsplit("_",2)[0]) #remove tile
tile_names = np.unique(tile_names)
for tile_name in tile_names: #iterate over and load tiles
images_in_tile = [string for string in image_names if tile_name in string]
tile = cv2.imread(os.path.join(tiles_dir, tile_name + ".tif"), cv2.IMREAD_UNCHANGED) #read in tile
tile_height, tile_width, tile_channels = tile.shape #the size of the tile
row_index = math.ceil(tile_height/item_dim)
col_index = math.ceil(tile_width/item_dim)
for image_name in images_in_tile: #iterate over images associated with each tile
y, x = image_name.split("_")[-2:] #y=row;x=col
y = int(y)
x = int(x)
#save image
img = tile_to_chip_array(tile, x, y, item_dim) #subset the chip from the tile
image_name_ext = image_name + '.png' # row_cols #specify the chip names
image_path = os.path.join(new_image_dir, image_name_ext)
if not os.path.exists(image_path):
cv2.imwrite(image_path, img) #save images | AST-data-eng | /AST_data_eng-0.0.5.tar.gz/AST_data_eng-0.0.5/AST_data_eng/form_calcs.py | form_calcs.py |
ASTFormatter
============
The ASTFormatter class accepts an AST tree and returns a valid source code representation of that tree.
Example Usage
-------------
::
from astformatter import ASTFormatter
import ast
tree = ast.parse(open('modulefile.py'), 'modulefile.py', mode='exec')
src = ASTFormatter().format(tree, mode='exec')
Bugs
----
- Currently, indentation is fixed at 4 spaces.
- Too many methods are exposed that shouldn't be, in order to properly subclass `ast.NodeVisitor`.
- Need to make the statement visitor methods consistent about returning a list of strings; most still just return a string.
- Code modified to work with 3.x needs cleanup
Latest Changes
--------------
`0.6.4 <'https://pypi.python.org/pypi?:action=display&name=ASTFormatter&version=0.6.4'>`_ (2017-06-25)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Process docstring if exsts in Module, ClassDef, and FunctionDef
nodes.
`#9 <https://github.com/darkfoxprime/python-astformatter/pull/9>`_
(`darkfoxprime <https://github.com/darkfoxprime>`_)
- Add parens around unary operands if necessary
`#8 <https://github.com/darkfoxprime/python-astformatter/pull/8>`_
(`zerebubuth <https://github.com/zerebubuth>`_)
Copyright
---------
Copyright |copy| 2015-2016 by Johnson Earls. Some rights reserved. See the license_ for details.
.. _license: https://raw.githubusercontent.com/darkfoxprime/python-astformatter/master/LICENSE
.. |copy| unicode:: 0xA9 .. copyright sign
| ASTFormatter | /ASTFormatter-0.6.4.tar.gz/ASTFormatter-0.6.4/README.rst | README.rst |
import ast
import re
__all__ = ('ASTFormatter',)
import sys
# for sys.version
########################################################################
# The ASTFormatter class walks an AST and produces properly formatted
# python code for that AST.
class ASTFormatter(ast.NodeVisitor):
'''
ASTFormatter
============
The ASTFormatter class accepts an AST tree and returns a valid source code representation of that tree.
Example Usage
-------------
::
from astformatter import ASTFormatter
import ast
tree = ast.parse(open('modulefile.py'), 'modulefile.py', mode='exec')
src = ASTFormatter().format(tree, mode='exec')
Bugs
----
- Currently, indentation is fixed at 4 spaces.
- Too many methods are exposed that shouldn't be, in order to properly subclass `ast.NodeVisitor`.
- Need to make the statement visitor methods consistent about returning a list of strings; most still just return a string.
- Code modified to work with 3.x needs cleanup
Latest Changes
--------------
`0.6.4 <'https://pypi.python.org/pypi?:action=display&name=ASTFormatter&version=0.6.4'>`_ (2017-06-25)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Process docstring if exsts in Module, ClassDef, and FunctionDef
nodes.
`#9 <https://github.com/darkfoxprime/python-astformatter/pull/9>`_
(`darkfoxprime <https://github.com/darkfoxprime>`_)
- Add parens around unary operands if necessary
`#8 <https://github.com/darkfoxprime/python-astformatter/pull/8>`_
(`zerebubuth <https://github.com/zerebubuth>`_)
Copyright
---------
Copyright |copy| 2015-2016 by Johnson Earls. Some rights reserved. See the license_ for details.
.. _license: https://raw.githubusercontent.com/darkfoxprime/python-astformatter/master/LICENSE
.. |copy| unicode:: 0xA9 .. copyright sign
'''
__version__ = '0.6.4'
def __init__(self):
"""Return a new ASTFormatter object."""
# initialize the context to empty; every call to format()
# will introduce a new context for that call, and every
# node visited will have that node pushed to the top of the
# stack and popped after the visitor returns.
self.context = []
def format(self, AST, mode='exec'):
"""Accept an AST tree and return a properly formatted Python
expression or code block that compiles into that AST tree.
If mode is 'exec', treat the tree as if it were rooted at a
module; otherwise, for 'eval', treat it as if it were rooted
at an expr node.
"""
if not isinstance(AST, ast.AST):
raise TypeError("ASTFormatter.format() expected AST got " + type(AST).__name__)
if mode == 'exec':
self.context.insert(0, ast.Module)
elif mode == 'eval':
self.context.insert(0, ast.expr)
else:
raise ValueError("ASTFormatter.format() expected either 'eval' or 'exec' for mode, got " + repr(mode))
formatted = "".join(self.visit(AST))
self.context.pop(0)
return formatted
####################################################################
# helper methods
def visit(self, node):
"""Return the representation of the python source for `node`.
If `node` is an expression node, return a single string;
otherwise, return either a single newline-terminated string
or a list of newline-terminated strings.
FIXME: Only return lists of strings from non-expression nodes.
"""
self.context.insert(0, node.__class__)
retval = super(ASTFormatter, self).visit(node)
self.context.pop(0)
return retval
def __process_body(self, stmtlist, indent=""):
"""Process a body block consisting of a list of statements
by visiting all the statements in the list, prepending an
optional indent to each statement, and returning the indented
block.
"""
self.indent = len(indent)
content = []
for stmt in stmtlist:
stmts = self.visit(stmt)
if not isinstance(stmts, list):
stmts = [stmts]
content += ["%s%s" % (indent, stmt) for stmt in stmts]
return content
def generic_visit(self, node):
assert False, "ASTFormatter found an unknown node type " + type(node).__name__
####################################################################
# precedence of expression operators/nodes.
# each precedence is an integer, with higher values for
# higher precedence operators.
# the __precedence_list is a list of tuples of node types, in order
# lowest priority to highest. It is used to build the _precedence map.
__precedence_list = (
(ast.Lambda,),
(ast.IfExp,),
(ast.Or,),
(ast.And,),
(ast.Not,),
(ast.In, ast.NotIn, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.Gt, ast.GtE, ast.NotEq, ast.Eq, ast.Compare,),
(ast.BitOr,),
(ast.BitXor,),
(ast.BitAnd,),
(ast.LShift, ast.RShift,),
(ast.Add, ast.Sub,),
(ast.Mult, ast.Div, ast.Mod, ast.FloorDiv,),
(ast.UAdd, ast.USub, ast.Invert,),
(ast.Pow,),
(ast.Subscript, ast.Slice, ast.Call, ast.Attribute,),
(ast.Tuple, ast.List, ast.Dict,) + (((sys.version_info[0] < 3) and (ast.Repr,)) or ()) ,
)
# _precedence maps node types to a precedence number; higher values
# mean higher precedence. For example, ast.Mult and ast.Div will
# have higher precedence values thatn ast.Add and ast.Sub.
_precedence = {}
for __precedence_value in range(len(__precedence_list)):
for __token in __precedence_list[__precedence_value]:
_precedence[__token] = __precedence_value
# the __parens method accepts an operand and the operator which is
# operating on the operand. if the operand's type has a lower
# precedence than the operator's type, the operand's formatted value
# will be returned in (parentheses); otherwise, the operand's value
# will be returned as is. If operand is a BinOp or BoolOp node, the
# comparison is instead made against he operator encapsulted by the
# BinOp or BoolOp node.
def __parens(self, operand, operator):
operand_str = self.visit(operand)
if isinstance(operand, ast.BinOp):
operand = operand.op
elif isinstance(operand, ast.BoolOp):
operand = operand.op
operand = type(operand)
operator = type(operator)
if operand in self._precedence and operator in self._precedence:
if self._precedence[operand] < self._precedence[operator]:
operand_str = "(%s)" % (operand_str,)
return operand_str
####################################################################
# expression methods - these return a single string with no newline
def visit_Add(self, node):
return "+"
def visit_alias(self, node):
if getattr(node, 'asname', None) is None:
return node.name
else:
return "%s as %s" % (node.name, node.asname)
def visit_And(self, node):
return "and"
def visit_arg(self, node):
if getattr(node, 'annotation', None):
return "%s: %s" % (node.arg, self.visit(node.annotation))
return node.arg
def visit_arguments(self, node):
args = [self.visit(arg) for arg in node.args[:len(node.args) - len(node.defaults)]]
defargs = ["%s=%s" % (self.visit(arg), self.visit(default)) for (arg, default) in zip(node.args[-len(node.defaults):], node.defaults)]
if getattr(node, 'vararg', None):
vararg = ["*" + self.visit(node.vararg)]
elif getattr(node, 'kwonlyargs', None):
vararg = ["*"]
else:
vararg = []
if getattr(node, 'kwonlyargs', None):
kwonlyargs = [self.visit(arg) for arg in node.kwonlyargs[:len(node.kwonlyargs) - len(node.kw_defaults)]]
kwdefs = ["%s=%s" % (self.visit(arg), self.visit(default)) for (arg, default) in zip(node.kwonlyargs[-len(node.defaults):], node.defaults)]
else:
kwonlyargs = []
kwdefs = []
if getattr(node, 'kwarg', None):
kwarg = ["**" + self.visit(node.kwarg)]
else:
kwarg = []
return "(%s)" % (",".join(args + defargs + vararg + kwonlyargs + kwdefs + kwarg),)
def visit_Attribute(self, node):
return "%s.%s" % (self.__parens(node.value, node), node.attr)
def visit_BinOp(self, node):
return (" %s " % (self.visit(node.op),)).join([self.__parens(operand, node.op) for operand in (node.left, node.right)])
def visit_BitAnd(self, node):
return "&"
def visit_BitOr(self, node):
return "|"
def visit_BitXor(self, node):
return "^"
def visit_BoolOp(self, node):
return (" %s " % (self.visit(node.op),)).join([self.__parens(operand, node.op) for operand in node.values])
def visit_Bytes(self, node):
return repr(node.s)
def visit_Call(self, node):
args = [self.visit(arg) for arg in node.args]
keywords = [self.visit(keyword) for keyword in node.keywords]
if getattr(node, 'starargs', None):
starargs = ["*%s" % (self.visit(node.starargs),)]
else:
starargs = []
if getattr(node, 'kwargs', None):
kwargs = ["**%s" % (self.visit(node.kwargs),)]
else:
kwargs = []
return "%s(%s)" % (self.visit(node.func), ", ".join(args + keywords + starargs + kwargs))
def visit_Compare(self, node):
return "%s %s" % (self.visit(node.left), " ".join(["%s %s" % (self.visit(op), self.visit(right)) for (op, right) in zip(node.ops, node.comparators)]))
def visit_comprehension(self, node):
ifs = "".join([" if %s" % (self.visit(ifpart),) for ifpart in node.ifs])
return "for %s in %s%s" % (self.visit(node.target), self.visit(node.iter), ifs)
def visit_Dict(self, node):
return "{%s}" % (", ".join(["%s:%s" % (self.visit(key), self.visit(value)) for (key, value) in zip(node.keys, node.values)]),)
def visit_DictComp(self, node):
if getattr(node, 'generators', None):
return "{%s:%s %s}" % (self.visit(node.key), self.visit(node.value)," ".join(self.visit(generator) for generator in node.generators),)
return "{%s:%s}" % (self.visit(node.key), self.visit(node.value))
def visit_Div(self, node):
return "/"
re_docstr_escape = re.compile(r'([\\"])')
re_docstr_remove_blank_front = re.compile(r'^[ \n]*')
re_docstr_remove_blank_back = re.compile(r'[ \n]*$')
re_docstr_indent = re.compile(r'^( *).*')
def visit_DocStr(self, node):
"""an artificial visitor method, called by visit_Expr if its value is a string."""
docstring = self.re_docstr_remove_blank_front.sub('',
self.re_docstr_remove_blank_back.sub('',
self.re_docstr_escape.sub(r'\\\1', node.s))).split('\n')
if len(docstring) > 1:
docstr_indents = [
len(self.re_docstr_indent.sub(r'\1', ds)) for ds in [
ds.rstrip() for ds in docstring[1:]
] if ds
]
docstr_indent = min(docstr_indents)
docstring = ['"""%s\n' % (docstring[0],)] + ["%s\n" % (ds[docstr_indent:],) for ds in docstring[1:]] + ['"""\n']
else:
docstring = ['"""%s"""\n' % (docstring[0],)]
return docstring
def visit_Ellipsis(self, node):
return "..."
def visit_Eq(self, node):
return "=="
def visit_ExtSlice(self, node):
return ", ".join([self.visit(dim) for dim in node.dims])
def visit_FloorDiv(self, node):
return "//"
def visit_GeneratorExp(self, node):
if getattr(node, 'generators', None):
return "(%s %s)" % (self.visit(node.elt), " ".join(self.visit(generator) for generator in node.generators),)
return "(%s)" % (self.visit(node.elt),)
def visit_Gt(self, node):
return ">"
def visit_GtE(self, node):
return ">="
def visit_IfExp(self, node):
return "%s if %s else %s" % (self.visit(node.body), self.visit(node.test), self.visit(node.orelse))
def visit_In(self, node):
return "in"
def visit_Index(self, node):
return self.visit(node.value)
def visit_Invert(self, node):
return "~"
def visit_Is(self, node):
return "is"
def visit_IsNot(self, node):
return "is not"
def visit_keyword(self, node):
if getattr(node, 'arg', None):
return "%s=%s" % (node.arg, self.visit(node.value))
else:
return "**%s" % (self.visit(node.value),)
def visit_Lambda(self, node):
return "lambda %s: %s" % (self.visit(node.args)[1:-1], self.visit(node.body))
def visit_List(self, node):
return "[%s]" % (", ".join([self.visit(elt) for elt in node.elts]),)
def visit_ListComp(self, node):
if getattr(node, 'generators', None):
return "[%s %s]" % (self.visit(node.elt), " ".join(self.visit(generator) for generator in node.generators),)
return "[%s]" % (self.visit(node.elt),)
def visit_Lt(self, node):
return "<"
def visit_LtE(self, node):
return "<="
def visit_LShift(self, node):
return "<<"
def visit_Mod(self, node):
return "%"
def visit_Mult(self, node):
return "*"
def visit_Name(self, node):
return node.id
def visit_NameConstant(self, node):
return repr(node.value)
def visit_Not(self, node):
return "not"
def visit_NotEq(self, node):
return "!="
def visit_NotIn(self, node):
return "not in"
def visit_Num(self, node):
return repr(node.n)
def visit_Or(self, node):
return "or"
def visit_Pow(self, node):
return "**"
def visit_Repr(self, node):
return "`%s`" % (self.visit(node.value),)
def visit_RShift(self, node):
return ">>"
def visit_Set(self, node):
return "{%s}" % (", ".join(["%s" % (self.visit(elt),) for elt in node.elts]),)
def visit_SetComp(self, node):
if getattr(node, 'generators', None):
return "{%s %s}" % (self.visit(node.elt), " ".join(self.visit(generator) for generator in node.generators),)
return "{%s}" % (self.visit(node.elt),)
def visit_Slice(self, node):
if getattr(node, 'lower', None):
lower = self.visit(node.lower)
else:
lower = ""
if getattr(node, 'upper', None):
upper = self.visit(node.upper)
else:
upper = ""
if getattr(node, 'step', None):
return ":".join([lower, upper, self.visit(node.step)])
else:
return ":".join([lower, upper])
def visit_Starred(self, node):
return "*" + self.visit(node.value)
def visit_Str(self, node):
return repr(node.s)
def visit_Sub(self, node):
return "-"
def visit_Subscript(self, node):
return "%s[%s]" % (self.visit(node.value), self.visit(node.slice))
def visit_Tuple(self, node):
if len(node.elts) == 1:
return "(%s,)" % (self.visit(node.elts[0]),)
return "(%s)" % (", ".join([self.visit(elt) for elt in node.elts]),)
def visit_UAdd(self, node):
return "+"
def visit_USub(self, node):
return "-"
def visit_UnaryOp(self, node):
return "%s %s" % (self.visit(node.op), self.__parens(node.operand, node.op))
def visit_withitem(self, node):
if getattr(node, 'optional_vars', None) is None:
return self.visit(node.context_expr)
else:
return "%s as %s" % (self.visit(node.context_expr), self.visit(node.optional_vars),)
def visit_Yield(self, node):
if getattr(node, 'value', None):
return "yield %s" % (self.visit(node.value),)
return "yield"
def visit_YieldFrom(self, node):
return "yield from %s" % (self.visit(node.value),)
####################################################################
# statement methods - these return either a single string or a list
# of strings, all terminated with a `\n` newline.
def visit_Assert(self, node):
if getattr(node, 'msg', None) is None:
msg = ""
else:
msg = "," + self.visit(node.msg)
return "assert %s%s\n" % (self.visit(node.test), msg)
def visit_Assign(self, node):
return "%s = %s\n" % (",".join([self.visit(target) for target in node.targets]), self.visit(node.value))
def visit_AugAssign(self, node):
return "%s %s= %s\n" % (self.visit(node.target), self.visit(node.op), self.visit(node.value))
def visit_Break(self, node):
return "break\n"
def visit_ClassDef(self, node):
decorators = [self.visit(dec) for dec in node.decorator_list]
supers = []
if getattr(node, 'bases', None) is not None:
supers.extend([self.visit(base) for base in node.bases])
if getattr(node, 'keywords', None) is not None:
supers.extend([self.visit(kw) for kw in node.keywords])
if getattr(node, 'starargs', None) is not None:
supers.append("*" + self.visit(node.starargs))
if getattr(node, 'kwargs', None) is not None:
supers.append("**" + self.visit(node.kwargs))
if len(supers):
supers = "(%s)" % (", ".join(supers))
else:
supers = ""
classdef = ["class %s%s:\n" % (node.name, supers)]
try:
if node.docstring is not None:
body = [ast.Expr(ast.Str(node.docstring))] + node.body
else:
body = node.body
except AttributeError:
body = node.body
classbody = self.__process_body(body, " ")
return decorators + classdef + classbody
def visit_Continue(self, node):
return "continue\n"
def visit_Delete(self, node):
return "del %s\n" % (",".join([self.visit(target) for target in node.targets]),)
if sys.version_info[0] == 2:
def visit_ExceptHandler(self, node):
if not node.type:
return ["except:\n"] + self.__process_body(node.body, " ")
if getattr(node, 'name', None):
return ["except %s,%s:\n" % (self.visit(node.type), self.visit(node.name))] + self.__process_body(node.body, " ")
return ["except %s:\n" % (self.visit(node.type),)] + self.__process_body(node.body, " ")
else:
def visit_ExceptHandler(self, node):
if not node.type:
return ["except:\n"] + self.__process_body(node.body, " ")
if getattr(node, 'name', None):
return ["except %s as %s:\n" % (self.visit(node.type), node.name)] + self.__process_body(node.body, " ")
return ["except %s:\n" % (self.visit(node.type),)] + self.__process_body(node.body, " ")
def visit_Exec(self, node):
inglobals, inlocals = "", ""
if getattr(node, 'globals', None) is not None:
inglobals = " in %s" % (self.visit(node.globals),)
if getattr(node, 'locals', None) is not None:
inlocals = ", %s" % (self.visit(node.locals),)
return "exec %s%s%s\n" % (self.visit(node.body), inglobals, inlocals)
def visit_Expr(self, node):
if isinstance(node.value, ast.Str):
return self.visit_DocStr(node.value)
return [ self.visit(node.value) + '\n' ]
def visit_Expression(self, node):
return self.visit(node.body)
def visit_For(self, node):
if getattr(node, 'orelse', None) is None or len(node.orelse) == 0:
orelse = []
else:
orelse = ["else:\n"] + self.__process_body(node.orelse, " ")
return [
"for %s in %s:\n" % (
self.visit(node.target),
self.visit(node.iter),
)
] + self.__process_body(node.body, " ") + orelse
def visit_FunctionDef(self, node):
decorators = [self.visit(dec) for dec in node.decorator_list]
funcdef = ["def %s%s:\n" % (node.name, self.visit(node.args))]
try:
if node.docstring is not None:
body = [ast.Expr(ast.Str(node.docstring))] + node.body
else:
body = node.body
except AttributeError:
body = node.body
funcbody = self.__process_body(body, " ")
return decorators + funcdef + funcbody
def visit_Global(self, node):
return "global %s\n" % (",".join(node.names),)
def visit_If(self, node):
content = ["if %s:\n" % (self.visit(node.test),)] + self.__process_body(node.body, " ")
if getattr(node, 'orelse', None) is not None and len(node.orelse) > 0:
if isinstance(node.orelse[0], ast.If):
orelse = self.__process_body(node.orelse, "")
orelse[0] = "el" + orelse[0]
else:
orelse = ["else:\n"] + self.__process_body(node.orelse, " ")
content.extend(orelse)
return content
def visit_Import(self, node):
return [ "import %s\n" % (self.visit(name),) for name in node.names ]
def visit_ImportFrom(self, node):
return "from %s%s import %s\n" % ("." * node.level, node.module, ", ".join([self.visit(name) for name in node.names]),)
def visit_Module(self, node):
try:
if node.docstring is not None:
body = [ast.Expr(ast.Str(node.docstring))] + node.body
else:
body = node.body
except AttributeError:
body = node.body
return self.__process_body(body)
def visit_Nonlocal(self, node):
return "nonlocal %s\n" % (",".join(node.names),)
def visit_Pass(self, node):
return "pass\n"
def visit_Print(self, node):
if getattr(node, 'dest', None) is None:
dest = ""
else:
dest = ">> %s, " % (self.visit(node.dest),)
if getattr(node, 'nl', None):
nl = ""
else:
nl = ","
return "print %s%s%s\n" % (dest, ", ".join([self.visit(value) for value in node.values]), nl)
def visit_Raise(self, node):
if getattr(node, 'clause', None) is not None:
return "raise %s from %s\n" % (self.visit(node.exc), self.visit(node.clause))
elif getattr(node, 'exc', None) is not None:
return "raise %s\n" % (self.visit(node.exc),)
elif getattr(node, 'tback', None) is not None:
params = (node.type, node.inst, node.tback)
elif getattr(node, 'inst', None) is not None:
params = (node.type, node.inst)
elif getattr(node, 'type', None) is not None:
params = (node.type,)
else:
params = ""
if len(params):
params = " " + ",".join([self.visit(param) for param in params])
return "raise%s\n" % (params,)
def visit_Return(self, node):
if getattr(node, 'value', None) is not None:
return "return %s\n" % (self.visit(node.value),)
return "return\n"
def visit_Try(self, node):
retval = ["try:\n"] + self.__process_body(node.body, " ")
handlers = getattr(node, 'handlers', None)
if handlers is not None and len(handlers) > 0:
for handler in handlers:
retval.extend(self.visit(handler))
orelse = getattr(node, 'orelse', None)
if orelse is not None and len(orelse) > 0:
retval.extend(["else:\n"] + self.__process_body(orelse, " "))
final = getattr(node, 'finalbody', None)
if final is not None:
retval.extend( ["finally:\n"] + self.__process_body(node.finalbody, " ") )
return retval
visit_TryExcept = visit_Try
visit_TryFinally = visit_Try
def visit_While(self, node):
if getattr(node, 'orelse', None) is None or len(node.orelse) == 0:
orelse = []
else:
orelse = ["else:\n"] + self.__process_body(node.orelse, " ")
return [
"while %s:\n" % (
self.visit(node.test),
)
] + self.__process_body(node.body, " ") + orelse
def visit_With(self, node):
if getattr(node, 'items',None) is not None:
asvars = ", ".join([self.visit(item) for item in node.items])
else:
if getattr(node, 'optional_vars', None) is None:
asvars = self.visit(node.context_expr)
else:
asvars = "%s as %s" % (self.visit(node.context_expr), self.visit(node.optional_vars),)
if len(node.body) == 1 and isinstance(node.body[0], ast.With):
subwith = self.visit(node.body[0])
return [ "with %s, %s" % (asvars, subwith[0][5:]) ] + subwith[1:]
else:
return [
"with %s:\n" % (asvars,)
] + self.__process_body(node.body, " ")
########################################################################
# simple tests
if __name__ == '__main__':
fmt = ASTFormatter()
import inspect
my_module = inspect.getfile(inspect.currentframe())
sys.out.write(fmt.format(ast.parse(open(my_module, 'rU').read(), my_module, mode='exec'))) | ASTFormatter | /ASTFormatter-0.6.4.tar.gz/ASTFormatter-0.6.4/astformatter/__init__.py | __init__.py |
# ASTFormula – simple and safe formula engine
## Description
This package is simple and extensible formula engine with python-like
syntax. **No eval usages under the hood** – it's a safe way to work
with formulas and variables from untrusted sources like user input.
## Usage
### Quick start
```python
from astformula import ASTFormula
engine = ASTFormula()
executable = engine.get_calc_expression('a + b * 2')
executable({'a': 2, 'b': 3}) # returns 8
```
### Custom functions
List of available functions can be supplemented by passing dict of
function names as keys and executables as values.
```python
from astformula import ASTFormula
def exp3(value):
return pow(value, 3)
engine = ASTFormula(functions={'exp3': exp3})
executable = engine.get_calc_expression('a + exp3(b * 2)')
executable({'a': 2, 'b': 3}) # returns 218
```
### Custom node processors
Any AST node processor can be overridden by passing `node_type: callback`
dict to node_processors argument. Callback must take the following arguments:
- engine: ASTFormula - engine instance
- node: ast.AST or core types – current node of type `node_type`
- variables: dict – all variables passed to the executable
In particular, passed `engine` instance can be used to:
- get operator by name: `engine.get_operator(name)`
- get function by name: `engine.get_function(name)`
- evaluate node: `engine.evaluate(node, variables)`
```python
import ast
from astformula import ASTFormula
def bin_op(engine: 'ASTFormula', node, variables):
# AST node structure: <node.left=left> <node.op=operator> <node.right=right>
result = engine.get_operator(node.op)(
engine.evaluate(node.left, variables),
engine.evaluate(node.right, variables)
)
return engine.evaluate(result)
engine = ASTFormula(node_processors={ast.BinOp: bin_op})
executable = engine.get_calc_expression('a + b')
executable({'a': 2, 'b': 3}) # returns 5
```
### Custom constants
To be implemented
### Custom operators processing
Operators processing can be overridden and implemented by passing
`ast_operator_node: callback` dict to operators argument.
```python
import ast
import operator as op
from astformula import ASTFormula
custom_operators_proc = {ast.Pow: op.pow} # **
engine = ASTFormula(operators=custom_operators_proc)
executable = engine.get_calc_expression('a ** b')
executable({'a': 2, 'b': 3}) # returns 8
```
### Handling exceptions
To simulate try..except statement a special function is provided out of
the box - `iferror(statement, fallback)`. Fallback executes only
if the main statement fails.
```python
from astformula import ASTFormula
engine = ASTFormula()
executable = engine.get_calc_expression('iferror(a ** b / 0, None)')
executable({'a': 2, 'b': 3}) # returns None
```
| ASTFormula | /ASTFormula-0.0.3.tar.gz/ASTFormula-0.0.3/README.md | README.md |
import ast
from typing import Union
import astunparse
from astformula.defaults.functions import DEFAULT_FUNCTIONS
from astformula.defaults.operators import DEFAULT_OPERATORS
from astformula.defaults.processors import DEFAULT_PROCESSORS
from astformula.exceptions import UnsupportedASTNodeError, \
UnsupportedOperationError, UnsupportedFunctionError, \
CalculationError, ParsingError
class ASTFormula:
def __init__(self, functions: dict = None, operators: dict = None,
node_processors: dict = None,
use_default: bool = True, mode: str = 'eval'):
self.mode = mode
self.functions = {**(DEFAULT_FUNCTIONS if use_default else {}),
**(functions or {})}
self.operators = {**(DEFAULT_OPERATORS if use_default else {}),
**(operators or {})}
self.node_processors = {}
for node_types, processor in {
**(DEFAULT_PROCESSORS if use_default else {}),
**(node_processors or {})
}.items():
if isinstance(node_types, (list, tuple)):
for node_type in node_types:
self.node_processors[node_type] = processor
else:
self.node_processors[node_types] = processor
def get_operator(self, ast_op):
if type(ast_op) in self.operators:
return self.operators[type(ast_op)]
raise UnsupportedOperationError(
message=f'Operation {ast_op} is not supported'
)
def get_function(self, name):
if name in self.functions:
return self.functions[name]
raise UnsupportedFunctionError(
f'Function {name} is not implemented')
def ast_parser(self, statement):
try:
return ast.parse(statement.replace('\n', ' '), mode=self.mode).body
except SyntaxError as e:
raise ParsingError(message=e.msg, node=e.text) from e
@staticmethod
def _unparse(node):
try:
return astunparse.unparse(node)
except Exception: # pylint: disable=W0703
return type(node)
def evaluate(self, node, tree_vars: Union[dict, None] = None):
try:
return self.process_eval(node, tree_vars)
except CalculationError as e:
raise e
except Exception as e:
node_fail = ASTFormula._unparse(node)
raise CalculationError(
message=str(e),
node=node_fail
) from e
def process_eval(self, node, variables: Union[dict, None] = None):
processor = self.node_processors.get(type(node))
# if no processor defined directly checking for subclasses
# TODO: cover with tests
if not processor:
for typ, proc in self.node_processors.items():
if isinstance(node, typ):
processor = proc
break
if not processor:
raise UnsupportedASTNodeError(
message=f'No processor defined for node {type(node)}',
node=ASTFormula._unparse(node)
)
return processor(self, node, variables)
def get_calc_expression(self, statement: str) -> callable:
parsed = self.ast_parser(statement)
def calculator(variables: dict):
return self.evaluate(parsed, variables)
return calculator | ASTFormula | /ASTFormula-0.0.3.tar.gz/ASTFormula-0.0.3/astformula/main.py | main.py |
import ast
import operator as op
from decimal import Decimal
from typing import TYPE_CHECKING
from astformula.exceptions import UnsupportedOperationError, \
MissingAttributeError, MissingVariableError
if TYPE_CHECKING:
from astformula.main import ASTFormula # pylint: disable=R0401
CALC_NONE = "CalcNone"
def if_error(engine, variables, condition, alternative_result):
try:
return engine.evaluate(condition, variables)
except Exception: # pylint: disable=W0703
return engine.evaluate(alternative_result, variables)
def not_in(container, item):
return not op.contains(container, item)
def ast_compare(engine: 'ASTFormula', node, variables):
operator = engine.get_operator(node.ops[0])
if operator in [op.contains, not_in]:
result = operator(
engine.evaluate(node.comparators[0], variables),
engine.evaluate(node.left, variables))
else:
result = operator(
engine.evaluate(node.left, variables),
engine.evaluate(node.comparators[0],
variables))
return result
def ast_bool_and(engine: 'ASTFormula', node, variables):
result = None
for value in node.values:
result = engine.evaluate(value, variables)
if not result:
return result
return result
def ast_bool_or(engine: 'ASTFormula', node, variables):
result = None
for value in node.values:
result = engine.evaluate(value, variables)
if result:
return result
return result
def get_keywords(engine: 'ASTFormula', node,
variables): # pylint: disable=W0613
dict_all = {}
if node.keywords:
for elem in node.keywords:
dict_all[elem.arg] = engine.evaluate(elem.value)
return dict_all
def num(engine: 'ASTFormula', node, variables): # pylint: disable=W0613
result = node.n
if isinstance(result, float):
result = Decimal(f'{result}')
return result
def constant(engine: 'ASTFormula', node, variables): # pylint: disable=W0613
result = node.value
if isinstance(result, float):
result = Decimal(f'{result}')
return result
def raw_val(engine: 'ASTFormula', node, variables): # pylint: disable=W0613
return node
def float_val(engine: 'ASTFormula', node, variables): # pylint: disable=W0613
return Decimal(f'{node}')
def string(engine: 'ASTFormula', node, variables): # pylint: disable=W0613
return node.s
def raw_list(engine: 'ASTFormula', node, variables):
result = []
for el in node:
if isinstance(el, ast.Starred):
result += [*engine.evaluate(el.value, variables)]
else:
result += [engine.evaluate(el, variables)]
return result
def raw_tuple(engine: 'ASTFormula', node, variables):
result = []
for el in node:
if isinstance(el, ast.Starred):
result += [*engine.evaluate(el.value, variables)]
else:
result += [engine.evaluate(el, variables)]
return tuple(result)
def bin_op(engine: 'ASTFormula', node, variables):
result = engine.get_operator(node.op)(
engine.evaluate(node.left, variables),
engine.evaluate(node.right, variables)
)
return engine.evaluate(result)
def unary_op(engine: 'ASTFormula', node, variables):
return engine.get_operator(node.op)(
engine.evaluate(node.operand, variables))
def bool_op(engine: 'ASTFormula', node, variables):
if isinstance(node.op, ast.And):
result = ast_bool_and(engine, node, variables)
elif isinstance(node.op, ast.Or):
result = ast_bool_or(engine, node, variables)
else:
result = engine.get_operator(node.op)(
engine.evaluate(node.values[0], variables),
engine.evaluate(node.values[1], variables))
return result
def ast_tuple(engine: 'ASTFormula', node, variables):
result = []
for el in node.elts:
if isinstance(el, ast.Starred):
result += [*engine.evaluate(el.value, variables)]
else:
result += [engine.evaluate(el, variables)]
return tuple(result)
def ast_list(engine: 'ASTFormula', node, variables):
result = []
for el in node.elts:
if isinstance(el, ast.Starred):
result += [*engine.evaluate(el.value, variables)]
else:
result += [engine.evaluate(el, variables)]
return result
def ast_dict(engine: 'ASTFormula', node, variables):
return {engine.evaluate(k, variables): engine.evaluate(v, variables) for
k, v in zip(node.keys, node.values)}
def ast_index(engine: 'ASTFormula', node, variables):
lst = engine.evaluate(node.value, variables)
if isinstance(node.slice, ast.Index):
# Handle regular index
idx = engine.evaluate(node.slice.value, variables)
else:
# Handle slices
idx = engine.evaluate(node.slice, variables)
return engine.evaluate(lst[idx])
def ast_call(engine: 'ASTFormula', node, variables):
if isinstance(node.func, ast.Name):
if not getattr(node.func, 'id', None):
raise UnsupportedOperationError(
f'Function {node.func} is not supported')
if node.func.id == 'iferror':
result = if_error(engine, variables, *node.args)
else:
result = engine.get_function(node.func.id)(
*engine.evaluate(node.args, variables),
**get_keywords(engine, node, variables))
return result
func = engine.evaluate(node.func, variables)
return func(
*engine.evaluate(node.args, variables),
**get_keywords(engine, node, variables)
)
def ast_attr(engine: 'ASTFormula', node, variables):
try:
attr_val = engine.evaluate(
node.value, variables).get(node.attr, CALC_NONE)
except AttributeError:
attr_val = getattr(
engine.evaluate(node.value, variables), node.attr, CALC_NONE)
if attr_val is CALC_NONE:
raise MissingAttributeError(f'Missing attribute {node.attr}')
return engine.evaluate(attr_val, variables)
def ast_if(engine: 'ASTFormula', node, variables):
if engine.evaluate(node.test, variables):
return engine.evaluate(node.body, variables)
return engine.evaluate(node.orelse, variables)
def ast_comp(engine: 'ASTFormula', node, variables):
dict_mode = isinstance(node, ast.DictComp)
result = {} if dict_mode else []
if node.generators:
gen = node.generators[0]
for val in engine.evaluate(gen.iter, variables):
if isinstance(gen.target, ast.Name):
local_vars = {gen.target.id: val}
elif isinstance(gen.target, ast.Tuple):
local_vars = dict(
zip(map(lambda elt: elt.id, gen.target.elts), val))
else:
raise TypeError(
f"Unsupported type {type(gen.target)} for"
f"list comprehensions")
vars_context = {**variables, **local_vars}
if not gen.ifs or all(bool(engine.evaluate(cond, vars_context)) for cond in gen.ifs):
if dict_mode:
result[engine.evaluate(node.key, vars_context)] = engine.evaluate(node.value, vars_context)
else:
result.append(
engine.evaluate(node.elt, vars_context)
)
return result
def ast_name(engine: 'ASTFormula', node, variables):
if node.id not in variables:
raise MissingVariableError(f"Variable {node.id} isn`t set")
return engine.evaluate(variables.get(node.id))
def ast_name_constant(
engine: 'ASTFormula', node, variables): # pylint: disable=W0613
return node.value
def ast_slice(
engine: 'ASTFormula', node, variables): # pylint: disable=W0613
lower = engine.evaluate(node.lower, variables)
upper = engine.evaluate(node.upper, variables)
step = engine.evaluate(node.step, variables)
return slice(lower, upper, step)
DEFAULT_PROCESSORS = {
ast.Num: num,
ast.Constant: constant,
float: float_val,
(int, str, bool, dict, type(None), Decimal): raw_val,
ast.Str: string,
list: raw_list,
tuple: raw_tuple,
ast.BinOp: bin_op,
ast.UnaryOp: unary_op,
ast.BoolOp: bool_op,
ast.Compare: ast_compare,
ast.Tuple: ast_tuple,
ast.List: ast_list,
ast.Dict: ast_dict,
ast.Subscript: ast_index,
ast.Call: ast_call,
ast.Attribute: ast_attr,
ast.IfExp: ast_if,
(ast.GeneratorExp, ast.ListComp, ast.DictComp): ast_comp,
ast.Name: ast_name,
ast.NameConstant: ast_name_constant,
ast.Slice: ast_slice
} | ASTFormula | /ASTFormula-0.0.3.tar.gz/ASTFormula-0.0.3/astformula/defaults/processors.py | processors.py |
# ASTROMER Python library 🔭
ASTROMER is a transformer based model pretrained on millions of light curves. ASTROMER can be finetuned on specific datasets to create useful representations that can improve the performance of novel deep learning models.
❗ This version of ASTROMER can only works on single band light curves.
🔥 [See the official repo here](https://github.com/astromer-science/main-code)
## Install
```
pip install ASTROMER
```
## How to use it
Currently, there are 2 pre-trained models: `macho` and `atlas`.
To load weights use:
```
from ASTROMER.models import SingleBandEncoder
model = SingleBandEncoder()
model = model.from_pretraining('macho')
```
It will automatically download the weights from [this public github repository](https://github.com/astromer-science/weights.git) and load them into the `SingleBandEncoder` instance.
Assuming you have a list of vary-lenght (numpy) light curves.
```
import numpy as np
samples_collection = [ np.array([[5200, 0.3, 0.2],
[5300, 0.5, 0.1],
[5400, 0.2, 0.3]]),
np.array([[4200, 0.3, 0.1],
[4300, 0.6, 0.3]]) ]
```
Light curves are `Lx3` matrices with time, magnitude, and magnitude std.
To encode samples use:
```
attention_vectors = model.encode(samples_collection,
oids_list=['1', '2'],
batch_size=1,
concatenate=True)
```
where
- `samples_collection` is a list of numpy array light curves
- `oids_list` is a list with the light curves ids (needed to concatenate 200-len windows)
- `batch_size` specify the number of samples per forward pass
- when `concatenate=True` ASTROMER concatenates every 200-lenght windows belonging the same object id. The output when `concatenate=True` is a list of vary-length attention vectors.
## Finetuning or training from scratch
`ASTROMER` can be easly trained by using the `fit`. It include
```
from ASTROMER import SingleBandEncoder
model = SingleBandEncoder(num_layers= 2,
d_model = 256,
num_heads = 4,
dff = 128,
base = 1000,
dropout = 0.1,
maxlen = 200)
model.from_pretrained('macho')
```
where,
- `num_layers`: Number of self-attention blocks
- `d_model`: Self-attention block dimension (must be divisible by `num_heads`)
- `num_heads`: Number of heads within the self-attention block
- `dff`: Number of neurons for the fully-connected layer applied after the attention blocks
- `base`: Positional encoder base (see formula)
- `dropout`: Dropout applied to output of the fully-connected layer
- `maxlen`: Maximum length to process in the encoder
Notice you can ignore `model.from_pretrained('macho')` for clean training.
```
mode.fit(train_data,
validation_data,
epochs=2,
patience=20,
lr=1e-3,
project_path='./my_folder',
verbose=0)
```
where,
- `train_data`: Training data already formatted as tf.data
- `validation_data`: Validation data already formatted as tf.data
- `epochs`: Number of epochs for training
- `patience`: Early stopping patience
- `lr`: Learning rate
- `project_path`: Path for saving weights and training logs
- `verbose`: (0) Display information during training (1) don't
`train_data` and `validation_data` should be loaded using `load_numpy` or `pretraining_records` functions. Both functions are in the `ASTROMER.preprocessing` module.
For large datasets is recommended to use Tensorflow Records ([see this tutorial to execute our data pipeline](https://github.com/astromer-science/main-code/blob/main/presentation/notebooks/create_records.ipynb))
## Resources
- [ASTROMER Tutorials](https://www.stellardnn.org/astromer/)
## Contributing to ASTROMER 🤝
If you train your model from scratch, you can share your pre-trained weights by submitting a Pull Request on [the weights repository](https://github.com/astromer-science/weights)
| ASTROMER | /astromer-0.0.6.tar.gz/astromer-0.0.6/README.md | README.md |
# ASTormTrooper
*ASTT* is a simple Linter, small and fast that reads dictionaries as configuration.<br>
It accepts functions and lambdas as custom rules. Sometimes, filters don't need much code.
<br><br>

# How to download
```py
# GIT+PIP
pip install git+https://github.com/ZSendokame/ASTormTrooper.git
# PIP
pip install ASTormtrooper
```
# How to use
You can call ASTT with:<br>
`python -m astt`
### Flags
-c: Change configuration file.<br>
-e: Files to exclude. (Example: `-e "excluded.py,otherfile.py"`)<br>
-a: Path to start the scan from. (Default: .)
# Examples
https://gist.github.com/ZSendokame/816c1d6ea9b78840254e70fd5e90d34a | ASTormtrooper | /ASTormtrooper-3.1.1.tar.gz/ASTormtrooper-3.1.1/README.md | README.md |
__author__='Sebastian Trumbore'
__author_email__='[email protected]'
import math
def sqrt(number, decimal_length=2):
try:
float(number)
if number < 0:
print("\033[0;31m" + ">> Error, invalid number; number must be positive.")
return ""
i = 0
j = 0
broken_number = []
if len(str(number)) % 2 == 1 and "." not in str(number):
broken_number.append(str(number)[i])
i += 1
elif len(str(number)) % 2 == 0 and "." in str(number):
broken_number.append(str(number)[i])
i += 1
j = math.ceil(len(str(number)) - i / 2)
for counter in range(int(j)):
if not len(str(number)) < i + 1:
if "." in str(number)[i] + str(number)[i + 1]:
broken_number.append(
str(number)[i] + str(number)[i + 1] + str(number)[i + 2]
)
i += 3
else:
broken_number.append(str(number)[i] + str(number)[i + 1])
i += 2
q = ""
q2 = 0
number = ""
d_placement = len(broken_number)
for counter in range(len(broken_number) + int(decimal_length)):
if number == 0 and counter > len(broken_number):
pass
else:
if counter >= len(broken_number):
number *= 100
else:
number = round(int(str(number) + broken_number[counter]))
I = 0
while int(str(q2) + str(I)) * int(I) <= int(number):
I += 1
I -= 1
number -= int(str(q2) + str(I)) * int(I)
q2 = int(q2) * 10 + int(I) * 2
q = str(q) + str(I)
answer = str(q[:d_placement:]) + "." + str(q[d_placement::])
return answer
except:
print("\033[0;31m" + ">> Error, invalid number")
return "" | ASqrtC | /ASqrtC-0.1.0-py3-none-any.whl/ASqrtC.py | ASqrtC.py |
# AStared
This package provide the A* algorithm for any type of coordinates.
You can find more information [here](https://en.wikipedia.org/wiki/A*_search_algorithm).
## Installation
```python3.11 -m pip install AStared```
## Usage
You can find examples [here](https://github.com/ThunderTecke/AStared/tree/main/examples).
In global lines, you must define 2 functions to interact with your coordinates. And then pass it to the function `AStar`
These function are :
- Heuristic estimation to the end node
- Neighbours giver, that return all valid neightbours that can be reached with only 1 step | AStared | /AStared-0.1.1.tar.gz/AStared-0.1.1/README.md | README.md |
import re
import unidecode as ud
from collections import Counter
from itertools import permutations
############################## Template Questions/Sentences ##############################
all_template_questions = [
'What is the problem',
'Location of the issue',
'What is the address of the vehicle crossing',
'What is the enquiry',
"What is the customer's name",
"What is the customer's contact number",
'Are there roadwork signs in the problem area',
'Did you see or obtain the registration number or signage of vehicle concerned',
'What is the vehicle crossing application number',
'What is the location',
"What is the customer's physical address",
"What is the customer's email address",
'If person phoning is from the Police, do they have an event number associated with the call',
'What is their permit number (if known)',
'What is the vehicle licence plate number associated with the permit',
'Has customer received their approved vehicle crossing permit',
'What is the issue with the sign',
'When would customer like to pour the concrete (date and time)',
'When will vehicle crossing be ready for inspection by AT (must be before concrete pour)',
'Vehicle registration number',
'What is the vehicle crossing application number (refer to notes above)',
'What type of sign is affected, e.g. street name, parking, bus stop, destination sign, give way',
'Does the customer have an approved vehicle crossing permit',
'How long has the road been like this',
'Is the damage causing a trip hazard, making the area impassable, or caused injury',
'Is the road sealed or unsealed',
'What is the location of the street lights',
'What damage has occurred to the footpath, off-road cycle path or road to road walkway, e.g. cracked, completely broken',
'Do you know who caused damage to the footpath, offload cycle path or road to road walkway',
'Is the light above a pedestrian crossing',
'Is the light a belisha beacon (orange light) at a pedestrian crossing',
'Is the location a rural intersection',
'Location of the sign',
'If graffiti etched into concrete, is it offensive',
'What is the location of the streetlight',
'Does this issue occur at all times, or does it only happen at a certain times of the day',
'Has the problem made the road or footpath impassable, e.g. pole fallen across road',
"Customer's email address",
'Why does customer want increased monitoring at this site',
'Is the problem on the road, footpath or cycleway',
'What size is the pothole',
'Location the car is parked in',
'Is the pothole causing traffic to slow down or swerve',
'Is the customer a resident at this location, regular visitor, workplace',
'What is the road name',
'How often does this occur, e.g. everyday once a week/twice a month',
'Has there been an accident as a result of the issue',
'Is the pothole in the main wheel-track of the road (in direct path of car wheels)',
'How big is the issue, e.g. over 1m, over 1km',
'Do you know who caused the spill',
'Date of parking',
'How is the issue causing an immediate hazard to vehicles or pedestrians',
'When did the problem occur, or is it happening now',
'Is grate partially or fully blocked',
'Location of the cesspit/catchpit drain grate',
'If unknown substance',
'What is blocking the grate, e.g. dirt, leaves',
'What type of phone is the customer using',
'Is the blockage likely to cause flooding soon, e.g. is there heavy rain now or forecast today',
'Why does customer want a refund',
'Where was customer parked',
'What time did customer start parking',
'What time did customer leave the car park',
'What time did AT Park parking session stop',
'If the light is inside an AT bus shelter (a shelter with no advertising panels)',
'If paint, is it still wet, or is it dry',
'Time and date of transaction',
'Location of the fallen branch or tree',
'What is the problem, e.g. item fallen off trailer, colour of rubbish bags, debris from blown-out car tyre etc',
'Is the fallen branch or tree causing an immediate health and safety risk to road or footpath users',
"Customer's contact number",
'What is the AT Park area ID number',
'Has a pre-pour inspection been successfully completed for this crossing',
'If yes, what is the risk/problem',
'Is the issue causing cars to slow down or swerve',
'What is the query, e.g. processing time, technical question',
'When will the vehicle crossing be finished and ready for inspection by AT',
'Does the road require grading or extra metal',
'Are conditions causing a traffic hazard without warning',
'What is the location of the sign',
'How large is the problem',
'How big is the pothole, or what area is uneven or needing metal',
'Is the issue causing traffic to slow down or swerve',
'Location of issue',
'Can you describe the problem as fully as possible',
'What is the address/approximate location of issue',
'If shoulder damage, is the damage crossing the outer white line into the main part of the road',
'What is the reason for this request',
'What is the exact location of the traffic signal (obtain two road names at intersection)',
'If pick-up is required',
'Are all the traffic lights affected or only one',
'What is the date/time that the issue is occurring',
'What is the location of the problem',
'Is the problem happening at a certain time of day',
'Which road is the driver on',
'Which direction is the customer travelling',
'Is the problem causing traffic to queue longer than usual',
'How many light changes does it take to get through',
'Does customer know who has caused the damage',
'What is the customer query',
'Has the equipment been moved to a safe location',
'Where is the work taking place on the road reserve, e.g. footpath, berm, traffic lanes',
'Can the caller give you the name of the company that carried out the works',
'Does caller know what works were being undertaken',
'Are there any identifying company logos or markings on the equipment, e.g. on back of the sign or bottom of the road cone',
'What is the name of the road or address of the piece of road (approximately)',
'Are leaves covering or blocking one or more stormwater grates',
"What is the customer's query",
'Are leaves covering footpath, cycleway and/or road',
'Has customer received a letter drop from Auckland Transport outlining the work details',
'How large is the area covered with leaf fall, e.g. length of 2 houses',
'Is the problem a result of an accident reported by the Police',
'Location of the grass verge or berm',
'When did the damage occur or is it happening now',
'Is the grass berm or verge on a traffic island or median strip',
'Where is the problem, e.g. road surface, footpath, vehicle crossing',
'What are the works being undertaken',
'What is the problem, e.g. trip hazard, loose stone, debris left behind, obstruction',
'Has someone damaged the road as a result of works happening on the road or nearby, e.g. AT contractors, utility company, builders or private contractors',
'Who is doing the activity, e.g. name of construction company',
'Where is the work taking place, e.g. footpath, berm, traffic lanes',
'Does caller have any other relevant information that they could provide, e.g. photos or video',
'Does customer have any other information, e.g. photos, video',
'What is the problem with the grass verge or berm, e.g. berm dug up, vehicle damage, large hole in berm or verge',
'Are there existing broken yellow lines or white / hockey stick parking lines or restrictions on one or both sides of the driveway',
'If vehicles are obstructing visibility, are they illegally parked ie within 1 meter of the driveway or on broken yellow lines',
'What is the issue, e.g. road safety issue, congestion, blocked access, vehicle crossing or entranceway, requesting an extension to an existing Traffic Management Plan',
'What direction is the light facing, e.g. facing traffic coming along Nelson St from motorway',
'What is the query',
'What is the issue, e.g. safety, congestion, blocked access, dangerous work site, vehicle crossing or entrance way blocked',
'For road cones placed on the road to reserve parking (not associated with any road works etc.) i.e. placed out by a local resident or business to reserve parking spaces, does caller have the name of the business or the name and address of the resident, who is responsible',
'What is obstructing visibility at this location, e.g. parked vehicles, road signs, fences or other barriers',
'Is a bulb faulty or not working (single light/bulb out)',
'Location of the dead animal',
'Is the dead animal causing traffic to swerve',
'Is the animal mutilated',
'Type of animal, e.g. dog, cat, possum, horse, cow, sheep etc',
'What colour is the animal',
'What is the location of the street light',
'Is the street light pole supporting a traffic signal',
'Is the street light pole supporting overhead power lines',
'Location of the ditch',
'Location of the problem',
'What is the customer query or issue with the works',
'What is the name of the road or address of the piece of road (approximately) where the works are taking place',
'When was the application lodged',
'If it is a new sub-division, have the lights been turned on before',
'Has customer received a letter drop or notification about the works from anyone',
'Location of the road markings',
'What is the name of the road or roads',
'Is customer querying progress of their pre-permit inspection',
'What is the nature of the customers concern',
'What is the speed limit of the road(s)',
'Is customer aware of any crashes at this location',
'Nature of works, are contractors digging holes, digging trenches, laying pipes etc',
'Customer contact number',
'What is the safety issue at this location',
'Is the speeding at a specific location or is it along the length of the road',
'Customer email address',
'What is wrong with the road markings, e.g. faded, missing, incorrect, need reinstatement after reseal',
'Are there any existing speed calming measures on the road, e.g. speed humps, chicanes',
'If the light is inside an Auckland Transport bus shelter (a shelter with no advertising panels)',
'Is customer concerned that vehicles are exceeding the speed limit or are vehicles going too fast for the road environment, e.g. short road, lots of corners',
'What is the address of the proposed vehicle crossing',
'If markings missing',
'If markings are incorrect',
'What type of road marking is being requested, e.g. give way marking, centre line marking',
'What is the customer name',
'What is their contact phone number and email address',
'List all streets with lights that are out',
'If the concern relates to parked vehicles, does the issue occur during a particular time of day',
'What is the location of the issue',
'Is this a sealed or unsealed road',
'Location of the bus shelter',
'Does the shelter have advertising panels/posters',
'Is the issue causing traffic to swerve',
'Why is sweeping required',
'Is the slip completely blocking the road, or just partially',
'Location of the cesspit, catchpit, drain or sump',
'Is resident trapped on their property or in their vehicle, on the road',
'Full name (as shown on photo ID)',
'SuperGold card client number (9 digit number found on the back of the card)',
'Contact telephone number',
'How would the customer like to be contacted',
'Concession expiry date',
'Are people or properties being impacted by this',
'Police file/reference number',
'Could signs be installed to prevent vehicles parking on the berm',
'Is there an immediate safety risk, e.g. danger to cars running over it, children could fall in drain',
'Location of the safety fence, traffic barrier or guardrail',
'Date of birth',
'Postal address',
'Where in the fence or barrier is the damage',
'Is this a vehicle crossing grate',
'How big is the slip or subsidence, e.g over 1m, over 1km',
'What is the location of the culvert, closet address and landmark',
'How large is the issue e.g. 1 metre long',
'Is the culvert blocked',
'Do you know what caused the damage to the fence or barrier',
'Does the damage present a health and safety risk to pedestrians',
'Location and direction of travel',
'Is the damage obstructing the road or footpath',
'Are there roadworks signs warning of reseal',
'When was the reseal done',
'How much loose chip is there',
'What is the address where the issue occurs (approximately)',
'Not an immediate health and safety risk',
'What is the problem, e.g',
'Is the glass cracked or completely smashed',
'What is the problem/issue',
'Is the glass shattered but still in the frame, or has it fallen out of the frame onto the floor',
'Is the smashed glass causing an immediate health and safety risk',
'Where is the container, bin, skip or temporary structure',
'How long has this been an issue',
'If request for street light to be relocated, why does it need to be moved',
'If congestion, what sort of delays are occurring',
'If no incident, what are the safety concerns',
'Is this caused by roadworks',
'Is there any other relevant information the customer can supply',
'What is the perceived cause of the problem',
'What is the major reason for congestion at this roadworks site',
'How long have you been In the queue',
'Has any incident occurred, e.g. crash, injury, trip',
'Is access blocked completely or is one lane still available',
'How many vehicles are affected',
'What is the problem with the tree or vegetation eg. overhanging or overgrowing encroaching footpath or road',
'Location of vehicle crossing',
'When did this occur or is it happening now',
'What is the problem with the vehicle crossing',
'What is it affecting',
'Was there any prior notification',
'Why are street lights needed at this location',
'What are the incident details',
'What is the address where the streetlight(s) or amenity light(s) are required',
'How many streetlights are being requested',
'What are the names of the roads at this intersection',
'Is the light required at a rural intersection',
'What is the address of the piece of road, approximately',
'Is the intersection controlled by traffic lights',
'If there was notification, what did it say and who sent it',
'What is the reason customer wants their AT Park account disabled',
'How long does it normally take at this location ( if applicable – trigger point is if more than 5 min)',
'If occurred within the road corridor',
'Does customer know who is doing the work',
'Is the activity causing a problem, e.g. obstruction to traffic or pedestrians, trip hazard',
'Machine number, or street address where machine is locate',
'Date and time of transaction',
'Method of payment (cash or card)',
'Area ID number/location of pay by plate machine',
"What is the customer's request",
'How big is the problem',
'Is the kerb or channel on a footpath, traffic island or roundabout',
'What type of activity is taking place and where is it in the road reserve, e.g. digging in berm or footpath',
'Is the culvert damaged',
'If customer is having issues with their account, what are the issues',
'Is a bulb faulty or not working',
'Is this a request for new restrictions or change to existing',
'Vehicle licence plate number',
'Is the problem causing traffic to swerve',
'What is the hazard',
'Has the problem caused an injury or made the area impassable',
'Has anyone been injured',
'Details of any other vehicles involved',
'What is the approximate address of the location concerned',
'What is the problem with the coverplate (door to wiring at the bottom of the street light) or street light shield',
'If occurred at an intersection',
'What pedestrian facilities are at this location, if any',
'Do you know who caused damage to the kerb or channel, e.g building site nearby, utility company',
'What is the location of the flooding',
'How much water is there',
'Does the road have kerb and channel or is there a ditch beside road',
'Is the flooding causing traffic to swerve',
'Is flooding causing pedestrians to walk on road',
'Is the issue causing an immediate health and safety issue',
'Date of incident',
'Is the flooding blocking accessibility to properties',
'Is flooding emergency now or likely to worsen with weather conditions, e.g. heavy rain forecast today',
'Date and time of the transaction',
'How did the damage occur',
'Is road completely blocked or partially blocked',
'Is the machine beeping',
"What is the caller's registration number",
"Customer's name",
"If paid by card, what are the last 4 digits of the card used (do not ask for full card number, AT will never ask for customer's full credit or debit card number)",
'How long has the road been affected',
'What is the name of the school',
'What direction is the light facing, eg. facing traffic coming along Nelson St from motorway',
'What type of sign is affected',
"What is the value of the caller's transaction",
'If customer can read it',
'What is the location of the problem, e.g. street address, road names',
'Is the problem ponding',
'If on state highway or motorway',
'What outcome is the customer expecting',
'What is the fault with the machine',
'What is the name of the street and suburb or approximate location of the road works',
'How long has the noise been going on',
'Did the customer receive a letter advising of the work',
'Does customer know the name of contractor doing the job',
'Which road is the driver on when visibility is restricted',
'What is obstructing visibility at this location, e.g. parked vehicles, traffic signs, fences',
'Are there any existing parking restrictions in the turning circle, e.g. signage or broken yellow lines',
'Is there a particular time of day this issue occurs',
'Does the problem occur at a particular time',
'Do all vehicles have difficulty turning or just larger vehicles, e.g. refuse trucks',
"Customer's email for contact",
'What is the issue, e.g. vibrating, noise, time of works etc.',
'Has customer witnessed an occasion when an emergency vehicle was unable to pass through the road',
'Method of payment',
"What is the nature of the customer's concern for emergency vehicles ability to access a road or other location",
'Vehicle licence plate number and any other plate numbers that are used by customer',
'Time of transaction',
'Machine number',
'What is the issue',
'Indicate the type of fault',
'If paid by card, what are the last 4 digits of the card used (do not ask for the full card number, AT does not ask for full credit or debit card number)',
'What is the customers contact number',
'Location of the street furniture or fittings',
'When did the works start',
'What is the problem with the furniture or fittings, e.g. seat broken',
'If stolen motorbike',
'Is the damage causing an immediate health and safety risk, e.g. seat will collapse if sat on',
'What is the existing traffic control at this intersection, e.g. roundabout, stop, give way, no control',
'What is the safety concern at this intersection',
'What type of vehicles does the customer want restricted',
'What is the reason for requesting a restriction of vehicle access',
'Fault description',
'If the wrong vehicle licence plate number was entered, what number plate was used, and what is the correct plate number',
'Which car park building',
'What is the extent of problem, eg entire road or just a section',
'What is the marking or sign that needs to be removed',
'Why do you believe it needs to be removed',
'What is the vehicle license plate number associated with the permit',
'What is the current speed limit',
'What is the exact location of the pedestrian buzzer/button on the traffic signal (obtain two road names at intersection)',
'What speed would customer suggest the limit be changed to',
'What is the nature of the problem, e.g. speed humps/judder bars are too high or too close together',
'What is the address and suburb',
'What information do they require',
'What assistance is required, e.g. temporary signage requested, needed urgently because of accident, road works',
'If in AT car park ',
'What is the section of roadside the customer is requesting a footpath',
'Is there an existing footpath on the other side of the road',
'What damage has occurred to the footpath',
'Is information required regarding current operation, general, or for a future planned activity',
'Car park building name and location',
'Does the issue occur at a particular time of day',
'Day and time of transaction (if possible)',
'Date and time of malfunction',
'Why is the customer requesting a refund',
'How long have they been on, e.g. today, the past few days',
'Customer contact details, including postal or physical address',
'What is the dollar value of transaction',
'How many street lights are always on',
'What seems to be the cause of the congestion',
'Form of payment, i.e. credit card or EFTPOS',
'Are there any existing plantings',
'Would this planting restrict visibility for motorists or pedestrians',
'What is the vehicle crossing application or reference number',
'What was the time and date of the originally booked inspection',
'What inspection was originally booked, i.e. pre-pour or final',
'Does customer have any idea when the vehicle crossing will be ready for inspection by AT',
'What is the address where the issue occurs, approximately',
'How did the customer pay, e.g. cash, credit card, voucher, parking debit card',
'Where is the location, e.g. Landmark, street address, intersection of the road names',
'If removal of plants or vegetation is being requested, why do they need to be removed',
'Location of the retaining wall',
'What is the CAR number',
'What is the address/location of the car park building or car park',
'Customer’s email address (having an email address is mandatory for AT to process the refund as quickly as possible, and is our preferred method of contact)',
'If query is about a CCTV attached to street light ',
'What type of tree planting is requested, e.g. pohutukawa tree, hedge',
'What is the address of the accessway',
'What is the problem with the bus shelter',
'What is the reason for requesting a barrier',
'What is the address of the issue',
'Why does the customer believe that the structure is illegal',
'What is causing the car to scrape',
'What is the structure that has been built on AT property, e.g. shed, fence, election sign',
'Location of the vehicle crossing',
'If customer regularly drives different vehicle(s), also provide the vehicle licence plate number(s), in case customer inadvertently entered the wrong vehicle details. ',
'What direction is the light facing',
'Does the bus shelter have major structural damage, e.g. damage from a motor vehicle accident',
'Is the structural damage causing an immediate health and safety risk',
'Description of incident',
'Location of works – e.g. footpath, grass berm, sealed road way, etc',
'How long has the structure been there',
'Do you know who caused damage to the retaining wall',
'If stolen bicycle or scooter – make and model ',
'Customers name',
'Is the vehicle crossing being used for residential or commercial purposes',
'What is the issue with the vehicle crossing',
'What is the problem with the retaining wall, e.g. falling over, blocking footpath/road',
'What are the names of the roads at the intersection',
'Customers contact phone number',
'Does the customer want the information posted or emailed',
'If stolen vehicle – year, make and model ',
'What brochure, document or form does the customer require',
'How many copies does the customer require',
'Vehicle details',
'Preferred Method of Contact',
'Where in the car park would they have lost their item',
'What day and time was the item lost',
'A description of what has been lost',
'What is the exact location where the issue is occurring at this intersection',
'Are vehicles parking legally or illegally (within 6 metres of an intersection)',
'Is the volume of the water in the vehicle crossing increasing',
'What is the customer contact number',
'What is your concern with people parking at this location (if a visibility issue, log as Visibility on road or driveway)',
'Location of the bridge',
'What is the problem with the bridge',
'Description of incident, e.g. road rage, theft, crash or accident etc.',
'If at an intersection',
'Has a vehicle hit the bridge',
'Is there a high likelihood it could be dangerous',
'Location of vehicle',
'Has the problem caused injury or made the bridge impassable',
'What is the customers name',
'Date and time incident occurred',
'What happened',
'What is the pedestrian safety issue',
'What is the address',
'What is the road name or location of where the incident took place',
'What is the problem with where or how the vehicle is parked',
'Is vehicle blocking an entrance',
'Has the problem made the road impassable, e.g. sign fallen across road',
'Please explain in detail what you believe the issue is',
'If query is about a CCTV attached to street light',
'What is the customer contact phone number or email address',
'Has someone damaged the road as a result of works happening on the road or nearby',
'Physical description of contractor',
'Has the problem made the road or footpath impassable',
'If paid by card, ask caller for the last 4 digits of their card number (do not ask for the full card number) ',
'If observed, a vehicle description or name on vehicle',
'Is this problem causing a safety issue to users',
'If in AT car park',
'What is the problem with the grass verge or berm',
'Is the call from the Police',
'Does vehicle pose a safety risk for other road users',
'Where in the building is the problem, e.g. near entrance, car park space number',
'Additional information',
'What is the problem,',
'What type of sign is affected,',
'If the light is inside an AT bus shelter',
'What is the name of the school the request is being made for',
'What is the issue at this location',
'Location of the speed hump/judder bar',
'What is the extent of problem',
'If paid by card, what are the last 4 digits of the card used',
'Was payment made using the AT Park app or ATM machine',
'Customers email address',
'What is the exact location of the traffic signal',
'What is the nature of the problem, e.g. damaged, trip hazard',
'Is this a request for improved signage or is there currently no signage in place',
'Where on the road is the no exit sign required, e.g. start of road or mid-block intersection',
'What is the name of the street',
'Which parking building or car park is the customer in',
'What is the problem, e.g. barrier arm not rising',
'Where is the work taking place on the road reserve',
'Would customer like to purchase a pre-paid parking voucher',
'Customer email or postal address',
'What is the facility / business that signage is required for',
'What is the requested location for this new signage, e.g. street address or intersection',
'What is the reason this sign is wanted',
'Are there any other lights in the area not working e.g, shops, houses',
'Which car park building is the customer in',
"Customer's email for contact ",
'What is the issue the customer is concerned about',
'Has customer lost their voucher',
'If request is for renewal or replacement of the existing footpath, why does the caller believe this is necessary',
'When will vehicle crossing be ready for inspection by AT',
'Where is the problem',
'What intersection is the sign required for',
'Is there an existing street name blade for this street',
'If request is to relocate a street name sign, why does it need to be relocated',
'If request is for a new street name blade, why is it required',
'List all streets that are out',
'What is the issue with the traffic island, pedestrian refuge or chicane, e.g. damaged, trip hazard',
'What is missing',
'Has the street recently been resurfaced',
'What is obstructing visibility at this location',
'What is wrong with the road markings',
'Is this a one-way or no-exit street',
'What is the approximate location the customer feels the sign should be placed',
'Customers contact number',
'What is the address of the location that requires a fence',
'Why is a fence required at this location',
'Could signs be installed to prevent vehicles parking for sale',
'What is the safety issue',
'Is there a particular time of day the issue occurs',
'If request is for renewal or replacement of the existing stormwater asset, why does the caller believe this is necessary',
"Customer's car registration number",
'Any other queries',
'Can you identify the contractor',
'What is the address of the pedestrian crossing (approximately)',
'What is the name of the road',
'Has this issue been raised with the NZ Police',
'Has the problem made the sign unreadable',
'Location of vehicle and/or incident within building, e.g. level, car park number, near exit',
'Car details',
'Where is the work taking place',
'What is the NECAR number',
'Are there any relevant signage and/or road markings at this location',
'What is the intersection number and names of the roads at this intersection',
'Are there any other lights in the area not working, e.g. shops, houses',
'If the light is inside an Auckland Transport bus shelter',
'How long does it normally take at this location',
'What is the problem with the tree or vegetation',
'What is the information for',
'What is the location of the problem, e.g. street address or intersection',
'What is the problem, ',
'Date and time ',
'List all streets with lights that are out ',
'Description of incident, e.g. road rage, theft, crash or accident etc. ',
'What is the problem, e.g. spilt liquid, graffiti, litter',
'If stolen vehicle – year, make and model',
'What is the incident, e.g. vehicle damage, theft, breakdown',
'What caused the incident',
'Was the damage caused by parking building equipment or maintenance fault',
'Location of works – e.g. footpath, grass berm, sealed road way, etc ',
'If paid by coin',
'Which approach to the intersection is a camera required',
'What is the reason for a camera ie what did the caller observe at the intersection to suggest a new camera is required, did caller observe a crash or ongoing red light running',
'What is the location of the street furniture or fittings',
'What type of furniture or fitting is missing',
'Is the missing item causing an immediate health and safety risk',
'What is the facility that signage is required for',
'Where is the signage required',
'Was there a sign there previously',
'What damage has occurred to the footpath, off-road cycle path or road to road walkway',
'What is the reason for requesting the seat',
'At what location is the customer intending to install the convex mirror',
'Why is the convex mirror needed',
'Does the customer have permission from the landowner',
'What asset has been damaged',
'What damage has been done',
'Who is responsible for the damage',
'What is the location or name of the development',
'What is the Resource Consent or Land Use Consent number',
'If damage to car, ask customer to detail damage to car',
"Customer's name ",
'Is this a private mirror',
'Who installed the mirror, e.g. was it the property owner, Auckland Transport or a legacy Council',
'What is the address of the mirror',
'What is the location of the mirror in relation to the closet address, e.g. opposite side of road from given address',
'If paid by card, ask caller for the last 4 digits of their card number (do not ask for the full card number)',
'Do you know who caused damage to the footpath',
'What is the address the customer would like the seat placed (approximately)',
'Advise on the webform if you or the customer has spoken to anyone else associated with this project, i.e. Stakeholder Manager or Police. Preferred Method of Contact',
'What is the problem, e.g. pothole, broken door',
'If theft, ask customer to provide list of what is missing and approximate value of each item',
'What is the location you would like the traffic count data gathered',
'What data does the customer request be gathered',
'Who has previously maintained the mirror',
'How long has this problem existed',
'What kind of sign is required, e.g. ducks crossing, horses in area',
'What is the vehicle crossing application number ',
'Is an insurance company involved',
"Please ensure that you have the entered the customer's contact details",
'If damaged, do you know who caused the damage',
'List all streets that are out.',
'How long has this been a problem',
'Is the damage causing a trip hazard',
'What type of road marking is being requested, e.g. give way marking, center line marking',
'What is the customers query',
'Can you describe the problem as fully as possible, i.e. is the street light, pole or fittings or exterior damaged',
"AT machine/meter number, this is a four-digit number displayed in the top left-hand corner of the machine's screen",
'Postal Address',
'What is their permit number',
'Customer query',
'Advise on the webform if you or the customer has spoken to anyone else associated with this project, i.e. Stakeholder Manager.',
'Urgent cleaning',
'What time of day does the issue occur',
'What is the wait time or length of queue at the intersection, approximately',
'What type of road marking is being requested',
'Is there an immediate safety risk',
"AT machine/meter number, this is a four-digit number displayed in the top left-hand corner of the machine's screen ",
'Additional Information',
'If request is for renewal or replacement of an existing infrastructure asset, why does the caller believe this is necessary',
"What is the customer's relationship to the development or project, i.e. land owner or developer",
'Was the website accessed on a mobile device (phone/tablet) or computer (desktop/laptop)',
'What is the existing traffic control at the intersection, e.g. give way, roundabout, stop, no control',
'What is the building consent number',
'If yes, what is the vehicle crossing application number',
'What is the problem ',
'Is the request for maintenance or replacement of an existing sign',
'Is the request for a new sign',
'Ask caller for the Police event number associated with the call and record the number in your notes.',
'What type of sign is affected, ',
'Is the blockage likely to cause flooding soon',
'List all streets with lights that are out ',
'Does customer have photos',
'What direction is the light facing, e.g. facing traffic coming along Nelson Street from motorway',
'If call is about a damaged CCTV camera, which floor is the camera located on and where',
'How large is the issue',
'If customer regularly drives different vehicle(s), also provide the vehicle licence plate number(s), in case customer inadvertently entered the wrong vehicle details',
'How big is the issue',
'Customer name',
'What is the query regarding',
'If customer regularly drives different vehicle(s), also provide the vehicle licence plate number(s), in case customer inadvertently entered the wrong vehicle details. N/A ',
'What is the section heading, e.g. Parking in Auckland',
'What browser was customer using, e.g. Internet Explorer, Chrome, Firefox, Safari',
'What is the issue/suggestion',
'Did customer get any error messages when trying to perform the task',
'How frequently does the congestion / delay occur',
'If stolen bicycle or scooter – make and model ',
'How big is the slip or subsidence',
'What is the address of the piece of road concerned, approximately',
'What does the customer think is required at this location',
'What is the location of the culvert',
'Vehicle license plate number',
'What type of sign is required',
'Why is the sign required',
'Where is the signage required in the car park building or car park',
'What is the issue,',
'Ask caller for the Police event number associated with the call and record the number in your notes',
'Location of works',
'What is the issue, ',
'Advise on the webform if you or the customer has spoken to anyone else associated with this project, i.e. Stakeholder Manager',
'Who is doing the activity',
'Description of incident, e.g. road rage, theft, crash or accident etc',
'What is the page link (URL) or name of online service, e.g. Journey Planner',
'If query is about a CCTV attached to street light N/A ',
'What is the problem with the cover plate (door to wiring at the bottom of the street light) or street light shield',
'On which approach(es) to the intersection does the congestion / delay occur',
'If stolen vehicle – year, make and model ',
'If for a re-inspection, why was it originally failed',
'When would customer like to pour the concrete',
'Does customer know what type of work was being carried out',
"Customer's contact number ",
'What is the name of the road or address of the piece of road',
'Type of animal',
'How big is the pothole',
'Check the query is not covered under any other maintenance process or new request. ',
'Which road safety program is the customer querying',
'Nature of works',
'Date and time',
'What is the customer',
'In 2022, vouchers may be offered through the AT Park app, to help the team prepare for the transition, ask customer',
'How often does this occur',
'If paid by card, ask caller for the last 4 digits of their card number (do not ask for the full card number) N/A ',
'If customer is requesting multiple receipts, ensure all dates and relevant vehicle licence plate numbers are listed (only one case is required)',
'If paid by card, ask caller for the last 4 digits of their card number (do not ask for the full card number) n/a ',
'Date, time and location of incident',
'If slippery',
'If trip hazard',
'Does customer have any other information',
'What is obstructing visibility at this location,',
'Is the pothole in the main wheel-track of the road',
'Where is the work taking place on the road reserve, ',
'Location of works – e.g. footpath, grass berm, sealed road way, etc ',
'Where is the clock',
'What is the problem with the clock',
'If query is about a CCTV attached to street light ',
'How large is the area covered with leaf fall',
'Level 2 Complaint',
'What is the current speed limit on the road',
'Is the sign missing or damaged',
'Location of the issue ',
'Length of abandonment',
'Customer’s email address',
'Name or area of consultation',
'List all streets that are out. ',
'What is the address of the crane',
'What is their contact phone number',
'What type of sign is affected, e.g. street name',
'If person phoning is from the Police',
'Why are the markings required',
'What is the address where the cross or sign is required, approximately',
'Where on the road reserve would the cross or sign be placed',
'What is the address of the scaffolding and/or gantry',
'Are leaves covering or blocking one or more storm water grates',
'Check the query is not covered under any other maintenance process or new request. ',
'What direction is the light facing, ',
'Location of works – e.g. footpath, grass berm, sealed road way, etc Road ',
'Where in the car park building',
'What is the location of the CCTV camera',
'What is the issue with the camera',
'What is the issue with the tactile indicator, e.g. damage, trip hazard, slippery',
'What is blocking the grate',
'What direction is the light facing,',
'If the request is for renewal or replacement of an existing infrastructure asset, why does the caller believe this is necessary',
'Any other relevant details. Preferred Method of Contact',
'If paid by card, what are the last 4 digits of the card used ',
'Location of works – Road ',
'Check this is an AT car park. ',
'How large is the issue e.g. 1 meter long',
'What is the address where angle parking is required, approximately',
'What is the current parking at this location',
'If customer regularly drives different vehicle(s), also provide the vehicle licence plate number(s), in case customer inadvertently entered the wrong vehicle details. ',
'If customer is requesting a free hi-vis vest ',
'What type of activity is taking place and where is it in the road reserve',
'If customer regularly drives different vehicle(s), also provide the vehicle licence plate number(s), in case customer inadvertently entered the wrong vehicle details. NA ',
'Are there any identifying company logos or markings on the equipment',
'What is the query,',
"Customer's name ",
'Any other relevant details',
"What is the customer's relationship to the development or project, i.e. land owner, neighbour or developer",
'What asset or facility is the customer querying',
'How is the caller impacted',
'Location of problem',
'Vehicle details (if possible)',
'What is or was the problem',
'What is the problem with the retaining wall',
'Which school program is the customer querying',
'What damage has occurred to the footpath, ',
'If in AT car park N/A ',
'What is the name of the road or address of the piece of road ',
'Where in the building is the lift',
'What is the lift number (if it is visible)',
'Advise on the webform if you or the customer has spoken to anyone else associated with this project',
'Check the query is not covered under any other maintenance process or new request',
'Location of works – e.g. footpath, grass berm, sealed road way, etc Road ',
'What is the problem with the tree',
'What type of road marking is being requested,',
'What damage has occurred to the footpath,',
'Is the problem on the road',
'If stolen bicycle or scooter – make and model',
'Where is the rubbish',
'What type of litter is it',
'Is it a single bit of rubbish or does the whole street in general need a clean up',
'How much litter is there',
'In 2022, vouchers may be offered through the AT Park app, to help the team prepare for the transition, ask customer (still a work in progress)',
'If customer is requesting a free hi-vis vest ',
'How long does it normally take at this location',
]
############################## Email Variables ##############################
at_email_domain = ["at.govt.nz", "at.co.nz", "govt.nz", "athop.co.nz", "aucklandtransport.govt.nz",
"aucklandcouncil.govt.nz", "snapsendsolve.com"]
staff_template_text = ["please leave the subject line of this email intact",
"started working on your case",
"please contact the auckland transport",
"20 viaduct harbour avenue",
"thanks for getting in touch with auckland rransport",
"We’ve started working on your case"
]
pronouns = ["i", "iam", "am", "me", "my", "im"]
email_pat = r'[\w.+-]+@[a-zA-Z0-9\-]+\.[\w.-]+'
email_text_to_be_removed = ["To:", "Subject:", "Sent:", "Received:", "Date:", "Re:", "Fw:", "RE:", "FW:", "Fwd:", "<", ">"]
############################## Web Variables ##############################
pat1_rephrase = {"Route Number :": ". The route number is ",
"Station name :": ". In station ",
"Date and time:": ". The date time is ",
"What does the problem relate to?:": ". The problem relates to ",
"Have you reported this issue before?:": "The issue has been reported before. ",
"Have you requested this before?:": ". I have requested this before. ",
"Do you know who caused this? :": ". This cause by ",
"Issue description :": ". ",
"What is the problem?:": ". ",
"What would you like to request? :": ". The request is ",
"What would you like to ask? :": ". ",
"What would you like to tell us? :": ". ",
"Does your request relate to a specific location? :": ". The location is ",
"Information requested :": ". The information request is ",
"What does your request relate to?:": ". The request relates to ",
"Let us know any other information OR if you would like to request inspection for more than one vehicle crossing located in the same area then please provide details here. :": " ",
"Let us know of any additional information :": ". ",
"Additional location information :": ". Additional location information is ",
"Let us know the details of your feedback or suggestions :": " ",
"Bus stop number :": ". The bus stop number is ",
"Suggested bus stop name :": ". The suggested bus stop name is ",
"Bus stop name :": ". The bus stop name ",
"Route:": ". In route ",
"Description:": ". ",
"Desired Outcome:": ". The desired outcome is ",
"Lost property description :": ". ",
"Park & Ride name :": ". At ",
"Direction of travel:": ". Travel direction"
}
personal_other_fields = [
"Type of inspection required:",
"Concrete pour date :",
"Vehicle crossing application reference number :",
"First name :",
"Last name :",
"Your preferred date:",
"Contact phone number:",
"Customer contact details:",
"Date of birth:",
"client number:",
"Business phone:",
"Name on SuperGold card :",
"Level 2 Complaint:",
"Preferred Method of Contact:"
]
AT_template_sentences = ["Ask for route or schedule information",
"Ask for accessible travel information",
"Ask for accessible travel information"
"Ask for on-board bus information",
"FINAL INSPECTION REQUEST",
"FINAL INSPECTION BOOKING",
"PREPOUR INSPECTION BOOKING",
"Official information LGOIMA",
"Update AT HOP SuperGold concession details",
"Request maintenance or report damage to off street carpark",
"- nearest address or landmark",
"nearest address or landmark"
]
############################## Phone Variables ##############################
questions_to_remove_with_answers = [
"What is the customer name",
"What is the address of the proposed vehicle crossing",
"What is the customer's name",
"What is the customer's contact number",
"What is the address of the vehicle crossing",
"What is the customer's physical address",
"What is the customer's email address",
"What is the AT Park area ID number",
"What is the vehicle licence plate number associated with the permit",
"What is their permit number",
"What type of phone is the customer using",
"When will vehicle crossing be ready for inspection by AT",
"When will the vehicle crossing be finished and ready for inspection by AT",
"When would customer like to pour the concrete",
"Date of parking",
"Time and date of transaction",
"Vehicle registration number",
"If person phoning is from the Police, do they have an event number associated with the call",
"Has customer received their approved vehicle crossing permit",
"Does the customer have an approved vehicle crossing permit",
]
questions_to_rephrased = {
"What is the issue with the sign": ". The sign is ",
"What time did AT Park parking session stop": ". The park session stoped at ",
"What is the query": ". The query is ",
"What is the vehicle crossing application number": ". vehicle crossing application number is ",
"What is the customer's request": ". ",
"What type of sign is affected": ". The sign affected is ",
"What damage has occurred to the footpath, off-road cycle path or road to road walkway": ". The damage is ",
"What is the problem": ". The problem is ",
"What is the enquiry": ". The enquiry is ",
"What is the road name": ". The road name is ",
"What is the address/approximate location of issue": ". The location is ",
"What is the location": ". The location is ",
"What is the location - nearest street address or landmark": ". The location is ",
"What is blocking the grate": ". The grate is blocked by ",
"What size is the pothole": ". The size of the pothole is ",
"What time did customer start parking": ". The customer started parking ",
"Why does customer want increased monitoring at this site": ". Reason for increased monitoring ",
"Why does customer want a refund": ". Customer wants refund because ",
"What is the location of the streetlight": ". The location of the streetlight ",
"Where was customer parked": ". The customer parked at ",
"How big is the issue": ". The issue is ",
"How often does this occur": ". This occurs ",
"How long has the road been like this": ". The road has been like this for ",
"How large is the problem": ". The problem is ",
"Does this issue occur at all times, or does it only happen at a certain times of the day": ". The issue occurs ",
"Do you know who caused damage to the footpath, offload cycle path or road to road walkway": ". The damage caused by ",
"Can you describe the problem as fully as possible": ". ",
"Location": ". Location is ",
"Location of the sign": ". Location of the sign is ",
"Is the damage causing a trip hazard, making the area impassable, or caused injury": ". The damage cause ",
"Is the road sealed or unsealed": ". The road is ",
"Is the light above a pedestrian crossing": ". The light above a pedestrian crossing",
"Is the light a belisha beacon (orange light) at a pedestrian crossing": ". The light is a belisha beacon at pedestrian crossing ",
"Is the location a rural intersection": ". The location is a rural intersection ",
"Is the problem on the road, footpath or cycleway": ". The problem on ",
"If graffiti etched into concrete, is it offensive": ". ",
"Has the problem made the road or footpath impassable": ". The problem caused "
}
yes_or_no_rephrase = {"Are there": ". Yes there are",
"Are there any": ". Yes there are",
"Are": ". Yes",
"Is the": ". Yes the",
"Is": ". Yes the",
"Has": ". Yes",
"Does": ". Yes",
"Has anyone": ". Yes someone"
}
template_sentences_to_be_removed = [
"Pre-pour inspection booking process - all questions need to be answered.",
"Preferred Method of Contact: E-mail",
"Preferred Method of Contact: Mobile Phone",
"If no or customer unsure, record this in the job notes.",
"If customer is unable to locate their application number on their form, refer to legacy council table to assist them.",
"(quite often the name or company logo is on the equipment i.e. cones, vehicles and signs)",
"Has any action been taken (e.g. emergency services, doctors visit etc)",
"If no, what is the likelihood that someone would be injured and if so, how serious would that be",
"Why and how is it dangerous Yes anyone been injured",
"( if applicable - trigger point is if more than 5 min)",
"(this is so that AT can compare with what the delay times are)",
"(10s, 100s, 1000s)",
"n/a",
"What is the problem with the grass verge or berm, Tree near the lamb post the logs are rotten and sharp pointing out and in the edge between the footpath and the berm, the tree need trimming along the edge of the berm."
]
############################## Utility functions ##############################
def regex_escape(text):
text = text.replace("(", "\(")
text = text.replace(")", "\)")
text = text.replace("+", "\+")
text = text.replace("[", "\[")
text = text.replace("]", "\]")
text = text.replace("?", "\?")
text = text.replace("*", "\*")
text = text.replace("$", "\$")
return text
def regex_escape_reverse(text):
text = text.replace("\(", "(")
text = text.replace("\)", ")")
text = text.replace("\+", "+")
text = text.replace("\[", "[")
text = text.replace("\]", "]")
text = text.replace("\?", "?")
text = text.replace("\*", "*")
text = text.replace("\$", "$")
return text
############################## Email Channel Processing Function ##############################
def merge_writer_header(s1, s2):
i = 0
while not s2.startswith(s1[i:]):
i += 1
return s1[:i] + s2
def check_owner(body, from_email=None):
if from_email:
email = re.findall(email_pat, from_email)
if email:
email = email[0]
if email[email.index('@') + 1 : ] in at_email_domain:
return "staff"
else:
return "customer"
if any(substr.lower() in body.lower() for substr in staff_template_text):
return "staff"
elif any(substr in body.split() for substr in pronouns):
return "customer"
else:
return "unknown"
def get_subject_body(writer, email, header=None):
sub_body = email
if header:
sub_body = sub_body.replace(merge_writer_header(writer,header), '')
for text in email_text_to_be_removed:
sub_body = sub_body.replace(text, '')
email_addrs = re.findall(email_pat, sub_body)
if email_addrs:
for addr in email_addrs:
sub_body = sub_body.replace(addr, '')
sub_body = sub_body.strip()
return sub_body
def parse_email(writer, email):
from_match = None
to_match = None
header_pat = ["From:(.*?)Sent:(.*?)To:(.*?)Subject:",
"From:(.*?)Received:(.*?)To:(.*?)Subject:",
"From:(.*?)Date:(.*?)To:(.*?)Subject:",
"From:(.*?)To:(.*?)Sent:(.*?)Subject:",
"From:(.*?)To:(.*?)Date:(.*?)Subject:",
"From:(.*?)Date:(.*?)Subject:(.*?)To:",
"From:(.*?)Subject:(.*?)Date:(.*?)To:",
"From:(.*?)To:(.*?)Subject:",
"From:(.*?)To:(.*?)Date:",
"From:(.*?)Received:(.*?)To:",
"From:(.*?)Sent:(.*?)To:",
"From:(.*?)To:"
]
header_pat_no_to = [
"From:(.*?)Date:(.*?)Subject Line:",
"From:(.*?)Date:(.*?)Subject:",
"From:(.*?)Received:(.*?)Subject:",
"From:(.*?)Sent:(.*?)Subject:",
"From:(.*?)Sent:",
"From:(.*?)Date:"
]
match = []
if "To:" in email:
for pat_ in header_pat:
if re.findall(f'({pat_})', email):
match = re.findall(f'({pat_})', email)
from_match = match[0][1]
if not pat_.split("(.*?)")[-1] == "To:":
to_match = re.findall("(\s?To:.*?(?:Sent:|Date:|Subject:))", match[0][0])[0]
break
else:
for pat_ in header_pat_no_to:
if re.findall(f'({pat_})', email):
match = re.findall(f'({pat_})', email)
from_match = match[0][1]
if match:
sub_body = get_subject_body(writer, email, header=match[0][0])
owner = check_owner(sub_body, from_email=match[0][1])
else:
sub_body = get_subject_body(writer, email)
owner = check_owner(sub_body)
return from_match, to_match, sub_body, owner
def get_email_attributes(writer, email):
from_match = None
to_match = None
sub_body = None
if "From:" in writer:
from_match, to_match, sub_body, owner = parse_email(writer, email)
elif "wrote:" in writer:
sub_body = email.replace(writer, '')
if re.findall(email_pat, writer):
from_match = re.findall(email_pat, writer)[0]
owner = check_owner(sub_body, from_email=from_match)
else:
owner = check_owner(sub_body)
else:
print("Came Else of get_email_attributes, please check")
return from_match, to_match, sub_body, owner
def parse_case(split_email):
from_ = []
to_ = []
sub_body_ = []
owner_ = []
ord_ = 0
i = 0
while i < len(split_email):
if ("From:" in split_email[i]) or ("wrote:" in split_email[i]):
order = ord_
if i+1 < len(split_email):
from_match, to_match, sub_body, owner = get_email_attributes(split_email[i], split_email[i]+split_email[i+1])
from_.append(from_match)
to_.append(to_match)
sub_body_.append(sub_body)
owner_.append(owner)
i += 2
else:
order = ord_
if split_email[i] != ' ' or split_email[i] != ' ':
from_match = None
to_match = None
sub_body = split_email[i]
owner = check_owner(sub_body)
from_.append(from_match)
to_.append(to_match)
sub_body_.append(sub_body)
owner_.append(owner)
i += 1
ord_ += 1
return from_, to_, sub_body_, owner_
def break_multi_wrote(text):
x = re.findall('wrote:', text)
wrote_pat = [
"On\s((?:\S+(?:\s+|$)){,17})wrote:",
"<*[\w.+-]+@[a-zA-Z0-9\-]+\.[\w.-]+>* wrote:"
]
result = []
for m in re.finditer('wrote:', text):
tmp = text[:m.end()]
y = re.findall(f'({"|".join(wrote_pat)})', tmp)
if y:
result.append(y[0][0])
else:
result.append("wrote:")
text = text.replace(tmp, '')
return result
### Main Function
def process_email_channel(s):
if "From:" in s or "wrote:" in s:
s = str(s)
s = s.replace("'", "")
if "(at) wrote:" in s or "(AT) wrote:" in s:
s = s.replace("(at) wrote:", "[email protected] wrote:")
s = s.replace("(AT) wrote:", "[email protected] wrote:")
at_name_replace_pattern = re.findall('From:(\s?(?:\w+(?:\s?)){0,5}\(AT\))', s)
if at_name_replace_pattern:
for l in at_name_replace_pattern:
s = s.replace(l, '.'.join(l.replace("(AT)", "").split())+"@at.govt.nz")
from_wrote_pat = [
"wrote:",
"From:",
"[ ]?[-]{1,}[ ]*Original Message[ ]*[-]{1,}[ ]{1,7}From:",
"[ ]?[-]{1,}[ ]*Original message[ ]*[-]{1,}[ ]{1,7}From:",
"[ ]?[-]{1,}[ ]*Original email below[ ]*[-]{1,}[ ]{1,7}From:",
"[ ]?[-]{1,}[ ]*Original Message[ ]*[-]{1,} > From:",
"[ ]?[-]{1,}[ ]*Original message[ ]*[-]{1,} > From:"
]
from_wrote_matchs = re.findall(f'({"|".join(from_wrote_pat)})', s)
from_wrote_matchs = list(set(from_wrote_matchs))
from_wrote_matchs_new = []
for match in from_wrote_matchs:
if "From:" in match:
from_wrote_matchs_new.append(match)
elif "wrote:" in match:
wrote_pat = [
"On\s((?:\S+(?:\s+|$)){,20})wrote:",
"<*[\w.+-]+@[a-zA-Z0-9\-]+\.[\w.-]+>* wrote:"
]
wrote_match = re.findall(f'({"|".join(wrote_pat)})', s)
if wrote_match:
for match in wrote_match:
if len(re.findall('wrote:', wrote_match[0][0])) > 1:
for result in break_multi_wrote(wrote_match[0][0]):
from_wrote_matchs_new.append(result)
else:
from_wrote_matchs_new.append(match[0])
else:
from_wrote_matchs_new.append('wrote:')
else:
print("No From: Wrote: matches found, please check")
from_wrote_matchs_new = [w.replace("(", "\(") for w in from_wrote_matchs_new]
from_wrote_matchs_new = [w.replace(")", "\)") for w in from_wrote_matchs_new]
from_wrote_matchs_new = [w.replace("+", "\+") for w in from_wrote_matchs_new]
from_wrote_matchs_new = [w.replace("[", "\[") for w in from_wrote_matchs_new]
from_wrote_matchs_new = [w.replace("]", "\]") for w in from_wrote_matchs_new]
if len(from_wrote_matchs_new) > 1:
pattern = "|".join(from_wrote_matchs_new)
else:
pattern = from_wrote_matchs_new[0]
split_email = re.split(f"({pattern})", s)
split_email = list(filter(lambda x: x if not x.isspace() else x.strip(), split_email))
split_email = list(filter(None, split_email))
from_emails, to_emails, sub_body_email, owner_label = parse_case(split_email)
customer_content_list = []
staff_content_list = []
if owner_label:
for i in range(len(owner_label)):
if owner_label[i] == "customer" or owner_label[i] == "unknown":
customer_content_list.append(sub_body_email[i])
elif owner_label[i] == "staff":
staff_content_list.append(sub_body_email[i])
return staff_content_list, customer_content_list
else:
return [], [s]
############################## Phone Channel Processing Function ##############################
def phone_channel_pattern_match(text):
pat2 = r'(?:^|\s)\d{1,2}[.]\s{1,}(.*?)(?:\:|\?| -|\d{1,2}[.])'
pat4 = r'Name:(.*?)Phone:(.*?)Location:(.*?)Preferred Method of Contact:'
pat5 = r'From:|wrote:'
pat6 = ["Graphical ID:", "Transaction Date:", "Process Date:", "Refund ID:"]
pat7 = ["Parking - Request for review of Infringement", "Enforcement stationary and abandoned vehicle",
"2122-Parking Request - Vehicle Towing"]
pat2_matchs = re.compile(pat2).findall(text)
pat4_matchs = re.findall(pat4, text)
pat5_matchs = re.findall(pat5, text)
pat6_matchs = re.findall(f"{'|'.join(pat6)}", text)
pat7_matchs = re.findall(f"{'|'.join(pat7)}", text)
return pat2_matchs, pat4_matchs, pat5_matchs, pat6_matchs, pat7_matchs
def remove_question_pat(question, questions, check_answer=False):
s_marker = regex_escape(question)
if questions.index(question)+1 < len(questions):
e_marker = questions[questions.index(question)+1]
e_marker = regex_escape(e_marker)
if not check_answer:
pat = "((?:^|\s)\d{1,2}[.]\s*" + s_marker + ".*?)" + "(?:" + "\d{1,2}[.]\s*" + e_marker + ")"
else:
pat = "(?:^|\s)\d{1,2}[.]\s*" + s_marker + "\?*(.*?)" + "(?:" + "\d{1,2}[.]\s*" + e_marker + ")"
else:
if not check_answer:
pat = "((?:^|\s)\d{1,2}[.]\s*" + s_marker + ".*?)" + "(?:Preferred Method of Contact|$)"
else:
pat = "(?:^|\s)\d{1,2}[.]\s*" + s_marker + "\?*(.*?$)"
return pat
def get_question(question, text):
pat = r'(?:^|\s)\d{1,2}[.]\s{1,}'+ regex_escape(question) + '.*?(?:\:|\?| -)'
if re.findall(pat, text):
return re.findall(pat, text)[0]
else:
return None
def clean_template_question(questions, text):
sentence_to_be_removed = []
sentence_to_be_rephrased = {}
for question in questions:
### Questions to be removed
if question in questions_to_remove_with_answers:
pat = remove_question_pat(question, questions)
if re.compile(pat).findall(text):
sentence_to_be_removed.append(re.compile(pat).findall(text)[0])
else:
print("Else remove question case-", text)
### Questions to be rephrased
if any(question.startswith(ques_) for ques_ in list(questions_to_rephrased.keys())):
matched_question = list(filter(None, [ques_ if question.startswith(ques_) else None for ques_ in list(questions_to_rephrased.keys())]))[0]
pat = remove_question_pat(question, questions, check_answer=True)
if re.compile(pat).findall(text):
ans_to_question = re.compile(pat).findall(text)[0]
ans_to_question = ' '.join(ans_to_question.split())
## Checking if answer is not empty if empty remove the question
if re.findall('\w+', ans_to_question):
if get_question(question, text):
sentence_to_be_rephrased[get_question(question, text)] = questions_to_rephrased[matched_question]
else:
sentence_to_be_rephrased[question] = questions_to_rephrased[matched_question]
else:
pat = remove_question_pat(question, questions)
sentence_to_be_removed.append(re.compile(pat).findall(text)[0])
else:
print("Else rephrase question case-", text)
### Yes or No Questions
if re.findall('^(\s*Is|\s*Are|\s*Has|\s*Does)', question):
pat = remove_question_pat(question, questions, check_answer=True)
if re.compile(pat).findall(text):
ans_to_question = re.compile(pat).findall(text)[0]
try:
## Checking if answer is 'no' if 'no' remove the question with answer
if re.findall('\w+', ans_to_question):
first_word_of_ans = re.findall('\w+', ans_to_question)[0]
if first_word_of_ans.lower() == "no":
if len(re.findall('\w+', ans_to_question)) > 2:
match_yes_or_no_phrase = re.findall('^'+'|'.join(list(yes_or_no_rephrase.keys())), question)[0]
ques_with_ans = re.findall(regex_escape(question)+".*?"+regex_escape(ans_to_question), text)[0]
to_replace_sentence = re.findall(regex_escape(question)+".*?"+regex_escape(first_word_of_ans), ques_with_ans)[0]
reconstructed_sentence = ' ' + ans_to_question.replace(first_word_of_ans, '', 1)
pat = remove_question_pat(question, questions)
if re.compile(pat).findall(text):
sentence_to_be_rephrased[re.compile(pat).findall(text)[0]] = reconstructed_sentence
else:
sentence_to_be_rephrased[ques_with_ans] = reconstructed_sentence
else:
pat = remove_question_pat(question, questions)
sentence_to_be_removed.append(re.compile(pat).findall(text)[0])
if first_word_of_ans.lower() == "yes":
if re.findall('^'+'|'.join(list(yes_or_no_rephrase.keys())), question):
match_yes_or_no_phrase = re.findall('^'+'|'.join(list(yes_or_no_rephrase.keys())), question)[0]
ques_with_ans = re.findall(regex_escape(question)+".*?"+regex_escape(ans_to_question), text)[0]
to_replace_sentence = re.findall(regex_escape(question)+".*?"+regex_escape(first_word_of_ans), ques_with_ans)[0]
reconstructed_sentence = question.replace(match_yes_or_no_phrase, yes_or_no_rephrase[match_yes_or_no_phrase]) + ' ' + ans_to_question.replace(first_word_of_ans, '', 1)
pat = remove_question_pat(question, questions)
if re.compile(pat).findall(text):
#text = text.replace(re.compile(pat).findall(text)[0], reconstructed_sentence)
sentence_to_be_rephrased[re.compile(pat).findall(text)[0]] = reconstructed_sentence
else:
sentence_to_be_rephrased[ques_with_ans] = reconstructed_sentence
#text = text.replace(ques_with_ans, reconstructed_sentence)
else:
pat = remove_question_pat(question, questions)
sentence_to_be_removed.append(re.compile(pat).findall(text)[0])
except:
print("Failed at clean template function-", text)
else:
print("Else Yes or No question case-", text)
if sentence_to_be_removed:
for sent_ in sentence_to_be_removed:
text = text.replace(sent_.strip(), '')
if sentence_to_be_rephrased:
for ques_,sent_ in sentence_to_be_rephrased.items():
text = text.replace(ques_, sent_)
for ques_ in questions:
if ques_ in all_template_questions:
try:
text = re.sub("((\d{1,2}[.]\s{1,}|)"+regex_escape(ques_)+"(\?|:|\?:| -|))", '. ', text)
except:
text = text.replace(ques_, '. ')
text = re.sub('\?', '', text)
return text
def process_phone_channel(text):
pat2_matchs, pat4_matchs, pat5_matchs, pat6_matchs, pat7_matchs = phone_channel_pattern_match(text)
### Dealing with Pattern 1
pat1_matchs = []
for sent_ in list(pat1_rephrase.keys())+personal_other_fields:
if sent_ in text:
pat1_matchs.append(regex_escape(sent_))
if (pat1_matchs and not pat2_matchs) or (len(pat1_matchs) > len(pat2_matchs)):
text = clean_form_type(pat1_matchs, text)
###Dealing with Templating Questions
elif (pat2_matchs and not pat1_matchs) or (len(pat2_matchs) > len(pat1_matchs)):
text = clean_template_question(pat2_matchs, text)
elif pat1_matchs:
text = clean_form_type(pat1_matchs, text)
else:
pass
### Dealing with Pattern 4
if pat4_matchs:
if re.findall("(Name:.*?Phone:.*?)Location:", text):
text = text.replace(re.findall("(Name:.*?Phone:.*?)Location:", text)[0], '')
text = text.replace("Location:", "The location is ")
text = text.replace("Issue:", "The issue is ")
text = text.replace("Road:", "The road is ")
### Dealing with Pattern 5 (Email)
if pat5_matchs:
staff_, customer_ = process_email_channel(text)
return staff_, customer_
if pat6_matchs:
text = re.sub("Transaction Date:\s*\d{1,2}-\d{1,2}-\d{1,4}\s\d{1,2}:\d{1,2}:\d{1,2}", '', text)
text = re.sub("Process Date:\s*\d{1,2}-\d{1,2}-\d{1,4}", '', text)
text = re.sub("Graphical ID:\s*\d{1,22}\s", '', text)
text = re.sub("Refund ID:\s*\d{1,10}\s", '', text)
text = re.sub("Card ID:\s*\d{1,19}\s", '', text)
text = re.sub("((?:^|\s|-)\d{1,21}\s*)(?:OTHER|FORGOT|WRONG_FARE|;|From)", '', text)
tmp = re.findall("FRM\[.*?\]TO\[.*?\]", text)
if tmp:
frm_ = re.findall("FRM\[(.*?)\]TO\[(.*?)\]", tmp[0])[0][0]
to_ = re.findall("FRM\[(.*?)\]TO\[(.*?)\]", tmp[0])[0][1]
text = text.replace(tmp[0], f" From {frm_} to {to_}")
reason_template_words = re.findall("OTHER|FORGOT|WRONG_FARE|LOST_STOLEN_CARD", text)
if reason_template_words:
for word_ in list(set(reason_template_words)):
text = text.replace(word_, f"Because {word_} reason")
text = text.replace("[", ' ')
text = text.replace("]", ' ')
text = text.replace(";", ',')
text = text.strip(',')
if pat7_matchs:
for sent_ in pat7_matchs:
text = text.replace(sent_, '')
text = text.replace(" . ", '. ')
text = text.replace("..", '.')
for sent_ in template_sentences_to_be_removed:
text = text.replace(sent_, '')
staff_content = re.findall('|'.join(AT_template_sentences), text)
if staff_content:
for sent_ in staff_content:
text = text.replace(sent_, '')
customer_content = [text]
return staff_content, customer_content
############################## Web Channel Processing Function ##############################
def clean_form_type(pat_found, text, pat1_rephrase=pat1_rephrase, personal_other_fields=personal_other_fields):
to_be_removed = []
for comb_ in list(permutations(pat_found, len(pat_found))):
pat_ = re.findall('(.*?)'.join(comb_)+'(.*?)$', text)
if pat_:
if isinstance(pat_[0], tuple):
pat_ = pat_[0]
for iter_ in range(len(pat_)):
if regex_escape_reverse(comb_[iter_]) in personal_other_fields:
if regex_escape_reverse(comb_[iter_]) == pat_found[-1]:
to_be_removed.append(comb_[iter_])
else:
to_be_removed.append(comb_[iter_]+regex_escape(pat_[iter_]))
else:
try:
if re.findall('\w+', pat_[iter_]):
if re.findall('\w+', comb_[iter_])[0] in ["Has", "Is", "Are", "Have"]:
first_word_of_ans = re.findall('\w+', pat_[iter_])[0]
if first_word_of_ans.lower() != "no":
if first_word_of_ans.lower() == "yes":
replace_untill = re.findall(f"((?:^|\s){first_word_of_ans}|.+?{first_word_of_ans})", pat_[iter_])[0]
text = re.sub(comb_[iter_]+regex_escape(replace_untill), pat1_rephrase[regex_escape_reverse(comb_[iter_])], text)
else:
text = re.sub(comb_[iter_], pat1_rephrase[regex_escape_reverse(comb_[iter_])], text)
else: #add yes or no question along with answer to remove list
replace_untill = re.findall(f"((?:^|\s){first_word_of_ans}|.+?{first_word_of_ans})", pat_[iter_])[0]
to_be_removed.append(comb_[iter_]+regex_escape(replace_untill))
else:
text = re.sub(comb_[iter_], pat1_rephrase[regex_escape_reverse(comb_[iter_])], text)
else:
to_be_removed.append(comb_[iter_]+regex_escape(pat_[iter_]))
except:
print("Failed at clean_form_type function-", text)
if to_be_removed:
for sent_ in to_be_removed:
try:
text = re.sub(sent_, '', text)
except:
text = text.replace(regex_escape_reverse(sent_), '')
return text
def process_web_channel(text):
### Cleaning Pattern 1
pat1 = ["Graphical ID:", "Transaction Date:", "Process Date:", "Refund ID:",
"FRM\[", "TO\[", "OTHER", "FORGOT", "WRONG_FARE"]
pat1_matchs = re.findall('|'.join(pat1), text)
if len(pat1_matchs) > 2:
text = re.sub("Transaction Date:\s*\d{1,2}-\d{1,2}-\d{1,4}\s\d{1,2}:\d{1,2}:\d{1,2}", '', text)
text = re.sub("Process Date:\s*\d{1,2}-\d{1,2}-\d{1,4}", '', text)
text = re.sub("Graphical ID:\s*\d{1,22}\s", '', text)
text = re.sub("Refund ID:\s*\d{1,10}\s", '', text)
text = re.sub("Card ID:\s*\d{1,19}\s", '', text)
text = re.sub("((?:^|\s|-)\d{1,21}\s*)(?:OTHER|FORGOT|WRONG_FARE|;|From)", '', text)
tmp = re.findall("FRM\[.*?\]TO\[.*?\]", text)
if tmp:
frm_ = re.findall("FRM\[(.*?)\]TO\[(.*?)\]", tmp[0])[0][0]
to_ = re.findall("FRM\[(.*?)\]TO\[(.*?)\]", tmp[0])[0][1]
text = text.replace(tmp[0], f" From {frm_} to {to_}")
reason_template_words = re.findall("OTHER|FORGOT|WRONG_FARE|LOST_STOLEN_CARD", text)
if reason_template_words:
for word_ in list(set(reason_template_words)):
text = text.replace(word_, f"Because {word_} reason")
text = text.replace("[", ' ')
text = text.replace("]", ' ')
text = text.replace(";", ',')
text = text.strip(',')
### Cleaning Pattern 3
pat3_matchs = []
for sent_ in list(pat1_rephrase.keys())+personal_other_fields:
if sent_ in text:
pat3_matchs.append(regex_escape(sent_))
if pat3_matchs:
text = clean_form_type(pat3_matchs, text)
text = text.strip('.')
staff_content = re.findall('|'.join(AT_template_sentences), text)
if staff_content:
for sent_ in staff_content:
text = text.replace(sent_, '')
customer_content = [text]
return staff_content, customer_content
############################## WalkIn Channel Processing Function ##############################
def process_walkin_channel(text):
not_needed_fields = [
"AT HOP card number?:",
"AT HOP card number? :",
"AT HOP card number:",
"Cancelled card:",
"Active card:",
"Card:",
"Account Name:",
"Trip ID:",
"Time of top up / transaction:",
"Mailing preference:",
"Date and time printed:",
"Date and Time:",
"Trip Start Time:",
"Time :",
"Date :",
"Customer's name:"
]
phrase_fields = {
"Location :": ". At ",
"Operator :": ". operator ",
"Note:": '. ',
"Concession:": ". Concession type ",
"Reason for dispute:": ". Dispute is ",
"Amount:": ". Amount charged ",
"Description:": '. ',
"Trip Incident:": '. ',
"Trip Information:": '. Trip information ',
"Description of incident:": ". Incident description ",
"Date of incident:": "Incident date ",
"Time of incident:": "Incident time ",
"Location of incident:": ". Incident location ",
"Depot:": ". depot ",
"Travel Direction:": ". Travel direction is"
}
walkin_matchs = []
for sent_ in list(phrase_fields.keys())+not_needed_fields:
if sent_ in text:
walkin_matchs.append(regex_escape(sent_))
if walkin_matchs:
text = clean_form_type(walkin_matchs, text, pat1_rephrase=phrase_fields, personal_other_fields=not_needed_fields)
return text
############################## Other Channels (Twitter, Webchat, Facebook) Processing Function ############################## | AT-CRM-Cleaning | /AT_CRM_Cleaning-0.0.1.tar.gz/AT_CRM_Cleaning-0.0.1/src/AT_CRM_Cleaning/AT_CRM_Cleaning.py | AT_CRM_Cleaning.py |
import os
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
import click
def gc_content(sequence):
GC = sequence.count("C") + sequence.count("G") + sequence.count("c") + sequence.count("g")
GC_content = (GC / len(sequence)) * 100
return GC_content
def find_at_rich(sequence, cutoff = 0):
regions = {} # Holds sequence of A/T rich regions with location as key
# Walk through sequence finding substrings that match parameters of search
start = 0
end = 1
while end <= len(sequence):
# Store sequence once gc content threshold is reached
if gc_content(sequence[start:end]) > cutoff:
if len(sequence[start:(end-1)]) > 1:
regions[(start+1, end)] = sequence[start:(end-1)]
#regions.append(sequence[start:(end-1)])
start = end # Update search parameter
end += 1
# Catch end sequences which match search parameters
elif end == len(sequence):
if gc_content(sequence[start:end]) <= cutoff:
if len(sequence[start:end]) > 1:
#regions.append(sequence[start:end])
regions[(start+1, end+1)] = sequence[start:end]
end += 1
# Walk on if no catch
else:
end += 1
return regions
@click.command()
@click.argument('file')
@click.option('-gc', '--gc_cutoff', type=float, default = 0, help='Cutoff for GC content of run, default = 0%')
@click.option('-l', '--min_length', type=int, default=2, help='Minimum length for A/T rich region, default = 2 bases.')
def cli(file, gc_cutoff=0, min_length=2):
"""Arguments:\n
FILE The input file in genbank format (.gb).
"""
record = SeqIO.read(file, 'genbank')
sequence = str(record.seq)
regions = find_at_rich(sequence, gc_cutoff)
click.echo('A/T Rich Regions:')
# Iterate through found A/T rich regions and add to genbank file depending on length cutoff
count = 0
for section in regions:
if len(regions[section]) >= min_length:
click.echo('Position: ' + str(section)) # Output A/T region to terminal
click.echo(regions[section])
count += 1
feature = SeqFeature(FeatureLocation(section[0], section[1]), type = 'AT_rich_region')
record.features.append(feature)
click.echo('Found ' + str(count) + ' A/T rich regions.')
new_file_information = record.format('genbank')
# Form output file
file_name = 'gc=' + str(gc_cutoff) + '_minlength=' + str(min_length) + '_' + os.path.basename(file)
base = os.getcwd()
path = os.path.join(base, file_name)
f = open(file_name, 'w')
f.write(new_file_information)
f.close()
click.echo('New File Written: ' + path)
if __name__ == '__main__':
cli() | AT-finder | /AT-finder-0.0.1.tar.gz/AT-finder-0.0.1/atFinder/atFinder.py | atFinder.py |
# ATACFragQC
The Python toolkits designed to control the fragment quality of Bulk/SingCell ATAC-seq.
## Installation
~~~
python3 -m pip install --upgrade ATACFragQC
~~~
## Usage
~~~
# Basic usage
ATACFragQC [options] -i <input.bam> -r <reference.gtf>
# For more information
ATACFragQC -h
~~~
## Features
* The distrubution of fragments in chromosomes
* The distrubution of fragment lengths
* The distrubution of fragments around transcription start sites (TSSs)
* Other feature would be supported in the future ...
## Overview

| ATACFragQC | /ATACFragQC-0.4.7.tar.gz/ATACFragQC-0.4.7/README.md | README.md |
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from _ATAX.accounting import *
from _ATAX.accounting import _Base_, _Entry_
from _TFL.pyk import pyk
from _TFL.Regexp import *
import _TFL.CAO
import _TFL.r_eval
class _Mixin_ (TFL.Meta.Object) :
def _setup_dates (self, target_year) :
self.head_date = "1.1.%s" % (target_year, )
self.midd_date = "30.6.%s" % (target_year, )
self.tail_date = "31.12.%s" % (target_year, )
self.head_time = Date (self.head_date)
self.midd_time = Date (self.midd_date)
self.tail_time = Date (self.tail_date)
self.target_year = int (target_year)
# end def _setup_dates
# end class _Mixin_
@pyk.adapt__bool__
class _IFB_ (TFL.Meta.Object) :
"""Base class for FBiG and IFB."""
def __init__ (self, entry) :
self.entry = entry
self.alive = entry.birth_time.year + 4 > entry.target_year
self.is_new = entry.birth_time.year == entry.target_year
# end def __init__
def round (self) :
self.value = self.value.rounded_as_target ()
# end def round
def __bool__ (self) :
return self.alive and bool (self.value)
# end def __bool__
# end class _IFB_
class FBiG (_IFB_) :
"""Model a FBiG (Freibetrag für investierte Gewinne)."""
abbr = "FBiG"
account = None
name = "Freibetrag für investierte Gewinne"
def __init__ (self, entry, ifb, source_currency) :
self.__super.__init__ (entry)
self.value = source_currency (float (ifb or entry.birth_value))
# end def __init__
# end class FBiG
class IFB (_IFB_) :
"""Model a IFB (Investitionsfreibetrag)."""
abbr = "IFB"
account = 7802
name = "Investitionsfreibetrag"
def __init__ (self, entry, ifb, source_currency) :
self.__super.__init__ (entry)
self.rate = int (ifb or 0) / 100.0
self.value = entry.birth_value * self.rate
# end def __init__
# end class IFB
class Anlagen_Entry (_Mixin_, _Entry_) :
cat = "Normal"
rate_pattern = r"(?P<rate> [-+*/().0-9\s]+)"
first_rate_pat = Regexp (rate_pattern, re.X)
later_rate_pat = Regexp \
( r"(?P<year> \d\d (?: \d\d)?) \s* : \s* " + rate_pattern
, re.X
)
_cat_pat = Regexp (r"C\[(?P<cat> [^]]+)\]", re.VERBOSE)
def __init__ (self, line, anlagenverzeichnis) :
try :
( self.desc, self.supplier, self.flags
, self.birth_date, self.a_value, self.afa_spec, ifb
, self.death_date
) = split_pat.split (line, 8)
except ValueError as exc :
print (line)
raise
final = "31.12.2037"
self.p_konto = self._get_p_konto (self.flags)
self.birth_time = Date (self.birth_date)
self.death_time = Date (self.death_date or final)
self.alive = self.death_time > anlagenverzeichnis.tail_time
self.contemporary = \
( self.birth_time <= anlagenverzeichnis.tail_time
and self.death_time >= anlagenverzeichnis.head_time
)
if int (self.death_time.year) < int (anlagenverzeichnis.year) :
self._setup_dates (self.death_time.year)
else :
self._setup_dates (anlagenverzeichnis.year)
self.half_date = "1.7.%s" % (self.birth_time.year, )
if "~" in self.flags :
self.half_date = "1.1.%s" % (self.birth_time.year + 1, )
self.half_time = Date (self.half_date)
self.desc = desc_strip_pat.sub ("", self.desc)
currency_match = currency_pat.search (self.a_value)
a_value = self.a_value
source_currency = anlagenverzeichnis.source_currency
if currency_match :
source_currency = EU_Currency.Table [currency_match.group (1)]
a_value = currency_pat.sub ("", a_value)
if EUC.target_currency is not ATS :
self.zero = source_currency (0.0)
else :
self.zero = source_currency (1.0)
self.source_currency = source_currency
self.birth_value = source_currency (TFL.r_eval (a_value))
self.new_value = source_currency (0.0)
self.out_value = source_currency (0.0)
if "G" in self.flags :
self.ifb = FBiG (self, ifb, source_currency)
else :
self.ifb = IFB (self, ifb, source_currency)
self._set_cat (self.flags)
# end def __init__
@property
def active (self) :
return \
( self.contemporary
and (self.current_depreciation > 0 or self.base_rate == 0)
)
# end def active
def evaluate (self) :
self._calc_rates ()
self.current_depreciation = \
self.birth_value * (self.current_rate / 100.0)
if "=" not in self.flags :
self.head_value = max \
( self.birth_value * ((100.0 - self.past_total_rate) / 100.)
, self.zero
)
self.tail_value = self.head_value - self.current_depreciation
if self.tail_value < self.zero :
self.tail_value = self.zero
self.current_depreciation -= self.zero
else :
self.head_value = self.tail_value = self.birth_value
if self.birth_time >= self.head_time :
self.head_value = self.source_currency (0.0)
self.new_value = self.birth_value
if not self.alive :
self.out_value = self.tail_value
self.tail_value = self.source_currency (0.0)
if self.tail_value.target_currency.to_euro_factor != 1.0 :
self.birth_value = self.birth_value.rounded_as_target ()
self.head_value = self.head_value.rounded_as_target ()
self.tail_value = self.tail_value.rounded_as_target ()
self.new_value = self.new_value.rounded_as_target ()
self.out_value = self.out_value.rounded_as_target ()
self.current_depreciation = \
self.current_depreciation.rounded_as_target ()
if self.ifb :
self.ifb.round ()
# end def evaluate
def _calc_rates (self) :
rates = [x.strip () for x in self.afa_spec.split (",")]
first_rate = rates [0]
first_rate_pat = self.first_rate_pat
later_rate_pat = self.later_rate_pat
if not first_rate_pat.match (first_rate) :
raise ValueError \
("%s doesn't match a depreciation rate" % (first_rate, ))
later_rates = []
for r in rates [1:] :
if not later_rate_pat.match (r) :
raise ValueError \
("%s doesn't match a depreciation rate" % (r, ))
y = Time_Tuple (later_rate_pat.year).year
later_rates.append ((y, TFL.r_eval (later_rate_pat.rate) * 1.0))
y_rate = self.base_rate = TFL.r_eval (first_rate_pat.rate) * 1.0
if later_rates :
later_rates.append ((self.target_year, later_rates [-1] [1]))
else :
later_rates.append ((self.target_year, y_rate))
y_rates = self.y_rates = \
[y_rate * ((0.5, 1.0) [self.birth_time < self.half_time])]
if self.birth_time < self.head_time :
current_year = self.birth_time.year + 1
for target_year, next_rate in later_rates :
while current_year < target_year :
y_rates.append (y_rate)
current_year += 1
y_rate = self.base_rate = next_rate
y_rates.append \
(y_rate * ((0.5, 1.0) [self.midd_time < self.death_time]))
self.current_rate = y_rates [-1]
past_total_rate = 0
for y_rate in y_rates [:-1] :
past_total_rate += y_rate
self.past_total_rate = min (past_total_rate, 100.0)
if self.past_total_rate + self.current_rate > 100.0 :
self.current_rate = 100.0 - self.past_total_rate
# end def _calc_rates
def _set_cat (self, flags) :
pat = self._cat_pat
if pat.search (flags) :
self.cat = pat.cat
# end def _set_cat
# end class Anlagen_Entry
class Anlagenverzeichnis (_Mixin_, _Base_) :
assignment_pat = Regexp \
( r"^\$ "
r"(?P<var> account_file | source_currency)"
r"\s* = \s*"
r"(?P<value> .*)"
r"\s* ;"
, re.X
)
header_format = "%-48s %-8s %10s %10s %8s %10s %10s"
entry1_format = "%-44s%4s %8s %10.2f %10.2f %5.2f %10.2f %10.2f"
newifb_format = " %-46s %8s %10s %10s %8s %10.2f %10s"
alive_format = " %-46s %8s %10s %10s %8s"
dying_format = " %-36.31s%10s %8s %10s %10s %8s %10.2f %10s"
footer_format = "\n%-48s %8s %10.2f %10.2f %8s %10.2f %10.2f"
new_format = "%-48s %8s %10s %10.2f"
out_format = "%-48s %8s %10s %10s %8s %10.2f"
account_format = \
" %s & & & %10.2f & b & %-5s & 2100 & - & %-3s & & %-6s %s\n"
ifb_type = ""
def __init__ (self, year, start_year, file_name, source_currency, account_file = None) :
self.year = year
self.start_time = Date ("1.1.%s" % (start_year, ))
self.file_name = file_name
self.source_currency = source_currency
self.account_file = account_file
self.entries = []
self.total_birth_value = source_currency (0.0)
self.total_head_value = source_currency (0.0)
self.total_tail_value = source_currency (0.0)
self.total_new_value = source_currency (0.0)
self.total_out_value = source_currency (0.0)
self.total_ifb_value = source_currency (0.0)
self.total_depreciation = source_currency (0.0)
self.total_per_cat = defaultdict (EUR)
self._setup_dates (year)
self.add_file (file_name)
# end def __init__
def add_file (self, file_name) :
assignment_pat = self.assignment_pat
file = open (file_name, "rb")
for line in file :
line = self._decoded (line)
if ignor_pat.match (line) : continue
line = ws_head_pat.sub ("", line, count = 1)
line = ws_tail_pat.sub ("", line, count = 1)
if not line : continue
if assignment_pat.match (line) :
self.eval_line (line, assignment_pat)
else :
self.add_line (line)
file.close ()
# end def add_file
def eval_line (self, line, match) :
name = match.var
expression = match.value.replace \
("$target_year", str (self.target_year))
value = TFL.r_eval (expression)
if name == "source_currency" :
value = EUC.Table [value]
setattr (self, name, value)
# end def eval_line
def add_line (self, line) :
self.entries.append (Anlagen_Entry (line, self))
# end def add_line
def evaluate (self) :
for e in self.entries :
if (not e.contemporary) or e.birth_time < self.start_time :
e.contemporary = 0
continue
e.evaluate ()
if e.active :
self.total_birth_value += e.birth_value
self.total_head_value += e.head_value
self.total_tail_value += e.tail_value
self.total_new_value += e.new_value
self.total_out_value += e.out_value
self.total_depreciation += e.current_depreciation
self.total_per_cat [e.cat] += e.current_depreciation
if e.ifb and e.ifb.is_new :
self.ifb_type = e.ifb.__class__
self.total_ifb_value += e.ifb.value
# end def evaluate
def write (self) :
pyk.fprint \
( self.header_format
% ( "", "", "Anschaff/", "Buchwert", " Afa ", "Afa", "Buchwert")
)
pyk.fprint \
( self.header_format
% ( "Text", "Datum", "Teil-Wert", "1.1.", " % "
, "IFB/Abgang", "31.12."
)
)
pyk.fprint ("\n%s\n" % ("=" * 116, ))
for e in self.entries :
if e.active :
self._write_entry (e)
pyk.fprint ("\n%s\n" % ("=" * 116, ))
pyk.fprint \
( self.footer_format
% ( "Summe", ""
, self.total_birth_value
, self.total_head_value
, "Afa"
, self.total_depreciation
, self.total_tail_value
)
)
if len (self.total_per_cat) > 1 :
for k, v in sorted (pyk.iteritems (self.total_per_cat)) :
pyk.fprint ((self.out_format % ("", "", "", "", k, v)))
pyk.fprint \
(self.new_format % ("Neuzugänge", "", "", self.total_new_value))
pyk.fprint \
( self.out_format
% ("Abgänge", "", "", "", "", self.total_out_value)
)
if self.total_ifb_value :
pyk.fprint \
( self.out_format
% ( self.ifb_type.name, "", "", "", self.ifb_type.abbr
, self.total_ifb_value
)
)
# end def write
def _write_entry (self, e) :
ifb_indicator = ""
if e.ifb :
ifb_indicator = e.ifb.abbr
pyk.fprint \
( self.entry1_format
% ( e.desc
, ifb_indicator
, e.birth_time.formatted ("%d.%m.%y")
, e.birth_value
, e.head_value
, e.base_rate
, e.current_depreciation
, e.tail_value
)
)
if e.alive :
if e.ifb and e.ifb.is_new :
pyk.fprint \
( self.newifb_format
% ( e.supplier, "", "", "", e.ifb.abbr, e.ifb.value, "")
)
elif e.ifb :
pyk.fprint (" %-36s%10.2f" % (e.supplier, e.ifb.value))
else :
pyk.fprint \
( self.alive_format
% (e.supplier, "", "", "", ("", "ewig") ["=" in e.flags])
)
else :
pyk.fprint \
( self.dying_format
% ( e.supplier
, "Abgang"
, e.death_time.formatted ("%d.%m.%y")
, ifb_indicator
, ("", e.ifb.value.as_string_s ()) [bool (e.ifb)]
, ("", "ewig") ["=" in e.flags]
, e.out_value
, ""
)
)
# end def _write_entry
def update_accounts (self) :
if self.account_file :
file = open (self.account_file, "w")
else :
file = sys.stdout
for e in self.entries :
if e.contemporary :
self._update_account_entry (e, file)
if self.account_file :
file.close ()
# end def update_accounts
def _update_account_entry (self, e, file) :
cat = "fe"
if e.p_konto :
cat = "%sP[%s]" % (cat, e.p_konto)
eoy = Date (day_to_time_tuple ("31.12."))
if e.active and e.current_depreciation :
self._write \
( file
, self.account_format
% ( eoy.formatted ("%d.%m.")
, e.current_depreciation, 7800, cat, "Afa", e.desc
)
)
if e.ifb and e.ifb.is_new and e.ifb.account :
self._write \
( file
, self.account_format
% (eoy.formatted ("%d.%m.")
, e.ifb.value, e.ifb.account, cat, e.ifb.abbr, e.desc
)
)
if not e.alive :
self._write \
( file
, self.account_format
% ( e.death_time.formatted ("%d.%m.")
, e.out_value, 7801, cat, "Abgang", e.desc
)
)
# end def _update_account_entry
def _write (self, file, s) :
file.write (pyk.as_str (s))
# end def _write
# end class Anlagenverzeichnis
def _main (cmd) :
source_currency = cmd.source_currency
year = cmd.year
start = cmd.Start_year
file_name = cmd.anlagenverzeichnis
account_file = cmd.account_file
anlagenverzeichnis = Anlagenverzeichnis \
(year, start, file_name, source_currency, account_file)
anlagenverzeichnis.evaluate ()
anlagenverzeichnis.write ()
if cmd.update_accounts :
anlagenverzeichnis.update_accounts ()
return anlagenverzeichnis
# end def _main
_Command = TFL.CAO.Cmd \
( handler = _main
, args =
( "year:S?Year of interest"
, "anlagenverzeichnis:P?File defining depreciation data"
)
, min_args = 2
, max_args = 2
, opts =
( "-account_file:P?Name of account file to update"
, "-Start_year:S=1988?Skip all entries before `Start_year`"
, "-update_accounts:B?Add depreciation entries to account file"
, TFL.CAO.Arg.EUC_Source ()
, TFL.CAO.Arg.EUC_Target ()
, TFL.CAO.Opt.Output_Encoding (default = "utf-8")
)
, description = "Calculate depreciations for `year`"
)
"""
year=2007 ; python -m ATAX.anlagenverzeichnis $year ~/EAR/anlagen_gewerbe.dat
"""
if __name__ == "__main__":
_Command ()
### __END__ ATAX.anlagenverzeichnis | ATAX | /ATAX-1.1.3.tar.gz/ATAX-1.1.3/anlagenverzeichnis.py | anlagenverzeichnis.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from _TFL.Date_Time import *
from _TFL.EU_Currency import *
from _TFL.pyk import pyk
import _TFL.CAO
year = Date ().year
def tax_brackets (year) :
if year < 2000 :
return \
( (ATS ( 50000), 0.10)
, (ATS ( 100000), 0.22)
, (ATS ( 150000), 0.32)
, (ATS ( 400000), 0.42)
, (ATS (2000000000), 0.50)
)
elif year < 2002 :
return \
( (ATS ( 50000), 0.00)
, (ATS ( 50000), 0.21)
, (ATS ( 200000), 0.31)
, (ATS ( 400000), 0.41)
, (ATS (2000000000), 0.50)
)
elif year < 2005 :
return \
( (EUR ( 3640), 0.00)
, (EUR ( 3630), 0.21)
, (EUR ( 14530), 0.31)
, (EUR ( 29070), 0.41)
, (EUR (2000000000), 0.50)
)
elif year < 2009 :
return \
( (EUR ( 10000), 0.00)
, (EUR ( 15000), 0.3833)
, (EUR ( 26000), 0.436)
, (EUR (2000000000), 0.50)
)
elif year < 2016 :
return \
( (EUR ( 11000), 0.000)
, (EUR ( 14000), 0.3635)
, (EUR ( 35000), 0.4321429)
, (EUR (2000000000), 0.500)
)
else :
return \
( (EUR ( 11000), 0.000)
, (EUR ( 7000), 0.250)
, (EUR ( 13000), 0.350)
, (EUR ( 29000), 0.420)
, (EUR ( 30000), 0.480)
, (EUR ( 910000), 0.500)
, (EUR (2000000000), 0.550) ### as of 2015, applies up to 2020
)
# end def tax_brackets
def tax (amount, year = year) :
result = 0
brackets = tax_brackets (year)
remains = amount
tax_chunks = []
for chunk, rate in brackets :
if remains <= 0.0 :
break
if chunk > remains :
chunk = remains
### don't use `-=` here (or `chunk = remains` fails)
remains = remains - chunk
tc = chunk * rate
result += tc
tax_chunks.append ((chunk, rate, tc))
return result, tax_chunks
# end def tax
def _main (cmd) :
source_currency = cmd.source_currency
year = cmd.year
amount = cmd.amount
tax_amount, tax_chunks = tax (amount, year)
if cmd.verbose :
for c, r, t in tax_chunks :
print \
( "%2d%% for %14s : %14s"
% (r * 100, c.as_string_s (), t.as_string_s ())
)
f = ( "In %s, for a taxable income of %s [%s]\n"
" you pay a tax of %s (%5.2f%%) and get %s\n"
)
print \
( f
% ( year, amount, cmd ["amount:raw"], tax_amount
, (tax_amount / (amount / 100.0)).amount
, amount - tax_amount
)
)
# end def _main
_Command = TFL.CAO.Cmd \
( handler = _main
, args =
( TFL.CAO.Arg.EUC
( name = "amount"
, description = "Amount of taxable income"
)
,
)
, min_args = 1
, max_args = 1
, opts =
( "-verbose:B?Show chunks, too"
, "-year:I=%s?Year of interest" % (year, )
, TFL.CAO.Opt.EUC_Source ()
, TFL.CAO.Opt.EUC_Target ()
)
, description = "Calculate income tax for `year`"
)
if __name__ == "__main__" :
_Command ()
### __END__ ATAX.income_tax_at | ATAX | /ATAX-1.1.3.tar.gz/ATAX-1.1.3/income_tax_at.py | income_tax_at.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from _ATAX import ATAX
from _TFL import TFL
from _TFL.Date_Time import *
from _TFL.EU_Currency import *
from _TFL.defaultdict import defaultdict
from _TFL.predicate import *
from _TFL.pyk import pyk
from _TFL.Regexp import *
import _TFL._Meta.Object
import _TFL.Accessor
import _TFL.CAO
import _TFL.r_eval
import _TFL.load_config_file
import math
import sys
import glob
ignor_pat = Regexp ( r"^\s*[«%#]")
empty_pat = Regexp ( r"^\s*$")
ws_head_pat = Regexp ( r"^\s*")
ws_tail_pat = Regexp ( r"\s*\n?$")
code_pat = Regexp ( r"^\s*\$")
perl_dict_pat = Regexp ( r"""\{\s*"?(\s*\d+\s*)"?\s*\}""")
split_pat = Regexp ( r"\s*&\s*")
currency_pat = Regexp ( r"([A-Za-z]+)$")
desc_strip_pat = Regexp ( r"\s*&\s*$")
def underlined (text) :
bu = "\b_"
return bu.join (text) + bu
# end def underlined
class _Base_ (TFL.Meta.Object) :
def _decoded (self, line) :
try :
result = line.decode ("utf-8")
except Exception as exc :
result = line.decode ("iso-8859-1")
return result
# end def _decoded
# end class _Base_
class _Entry_ (_Base_) :
_p_konto_pat = Regexp (r"P\[(?P<p_konto> \d+)\]", re.VERBOSE)
def _get_p_konto (self, flags) :
pat = self._p_konto_pat
if pat.search (flags) :
return pat.p_konto
# end def _get_p_konto
# end class _Entry_
class Account_Entry (_Entry_) :
"""Entry of accounting file."""
"""Cat (several letters can be combined)::
e include in income tax calculations
f financial planning
i Innergemeinschaftlicher Erwerb
k correction
r reverse charge (for VAT, Article 19 [057/066 in VAT form])
s storno
u include in VAT calculations
z pure VAT amount (Einfuhrumsatzsteuer)
g_or_n (exactly one letter)::
b gross amount
f VAT free
n net amount
v pure VAT amount
"""
### The following class variables can be overriden in a config file
### (e.g., ATAX.config)
erloes_minderung_pat = Regexp ( r"Legacy: use `~` instead")
ausgaben_minderung_pat = Regexp ( r"Legacy: use `~` instead")
minderung = False
def __init__ (self, line, source_currency, vst_korrektur = 1.0) :
self.line = line
try :
( self.date, self.number, self.vat_txt, gross, self.g_or_n
, self.soll, self.haben, self.dir, self.cat, self.plan_kat
, desc
) = split_pat.split (line, 10)
self.dtuple = day_to_time_tuple (self.date)
except (ValueError, TypeError) as exc :
pyk.fprint (exc)
pyk.fprint (line)
raise
self.desc = desc_strip_pat.sub ("", desc)
self.vst_korrektur = vst_korrektur
self.time = mktime (tuple (self.dtuple))
self.p_konto = self._get_p_konto (self.cat)
self.vat_type = ' '
for k in 'ir' :
if k in self.cat :
self.vat_type = k
break
currency_match = currency_pat.search (gross)
if currency_match :
source_currency = EUR.Table [currency_match.group (1)]
gross = currency_pat.sub ("", gross)
### avoid integer truncation by division
### otherwise `1/2' evaluates to `0.0' :-(
gross = gross.replace ("/", "*1.0/")
###
self.gross = source_currency (TFL.r_eval (gross))
self.netto = source_currency (self.gross)
if "s" in self.cat :
self.gross = - self.gross
self.netto = - self.netto
self.flag = "S"
elif "k" in self.cat :
self.flag = "k"
else :
self.flag = " "
self.vat_p = (TFL.r_eval (self.vat_txt or "0") / 100.0) + 1.0
if "n" == self.g_or_n :
self.gross = self.netto * self.vat_p
elif "b" == self.g_or_n :
self.netto = self.gross / self.vat_p
elif "v" == self.g_or_n :
### entry is pure VAT amount (Einfuhr-Umsatzsteuer)
self.netto = source_currency (0)
self.vat = self.gross - self.netto
self.vat_vstk = source_currency (0)
self.vat_x = source_currency (0)
self.is_change = 1
self.minderung = "~" in self.dir
if "-" in self.dir :
self.soll_betrag = self.netto
self.haben_betrag = source_currency (0)
self.minderung = \
self.minderung or self.erloes_minderung_pat.match (self.haben)
if not self.minderung :
self.konto = self.soll
self.gegen_konto = self.haben
if self.vat_p != 1 :
self.vat_vstk = self.vat
self.vat = self.vat * vst_korrektur
self.vat_vstk -= self.vat
self.vat_x = self.vat
else :
self.konto = self.haben
self.gegen_konto = self.soll
elif "+" in self.dir :
self.haben_betrag = self.netto
self.soll_betrag = source_currency (0)
self.minderung = \
( self.minderung
or self.ausgaben_minderung_pat.match (self.haben)
)
if not self.minderung :
self.konto = self.haben
self.gegen_konto = self.soll
else :
### In many cases, it is better to book dir `-` with a
### negative amount instead
###
### XXX VAT should be handled properly here: compare `-`
self.konto = self.soll
self.gegen_konto = self.haben
else :
self.is_change = 0
self.cati = " "
# end def __init__
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws `ValueError`
return getattr (self.__super, name)
return getattr (self.dtuple, name)
# end def __getattr__
def __str__ (self) :
return "%6s %2.2f%s%10s %10s %10s %-5s %-5s%2s%1s %s" % \
( self.date, self.vat_p, self.cati
, self.vat.as_string ()
, self.netto.as_string ()
, self.gross.as_string ()
, self.soll, self.haben, self.dir
, self.flag, self.desc [:18]
)
# end def __str__
def kontenzeile (self) :
## print self.vat_p, type (self.vat_p) # self.soll_betrag, self.haben_betrag
try :
vat_type = getattr (self, "vat_type", " ")
return "%02d%02d %-5s %-35.35s %2s%1s %12s %12s" % \
( self.day, self.month
, self.gegen_konto
, self.desc
, self.vat_txt
, vat_type
, self._soll_betrag ()
, self._haben_betrag ()
)
except Exception as exc :
pyk.fprint \
( exc
, type (self.soll_betrag), self.soll_betrag
, type (self.haben_betrag), self.haben_betrag
)
### raise
# end def kontenzeile
def _soll_betrag (self) :
if self.soll_betrag != 0 or "-" in self.dir :
return self.soll_betrag.as_string_s ()
else :
### if "+" not in self.dir : return "<< %s >>" % self.dir
return ""
# end def _soll_betrag
def _haben_betrag (self) :
if self.haben_betrag != 0 or "+" in self.dir :
return self.haben_betrag.as_string_s ()
else :
### if "-" not in self.dir : return "<< %s >>" % self.dir
return ""
# end def _haben_betrag
def _init (self, soll_betrag, haben_betrag, gegen_konto = "3293", konto = "") :
self.konto = konto
self.soll_betrag = EUR (soll_betrag)
self.haben_betrag = EUR (haben_betrag)
self.dtuple = day_to_time_tuple (self.date)
self.gegen_konto = gegen_konto
self.vat_txt = self.vat_p = self.cat_i = ""
self.soll = self.haben = self.flag = self.dir = ""
self.gross = self.netto = self.vat = EUR (0)
# end def _init
# end class Account_Entry
class Ust_Gegenbuchung (Account_Entry) :
def __init__ (self, month, konto, soll_betrag, haben_betrag, desc) :
self.date = "%s.%s" % (Time_Tuple.day_per_month [month - 1], month)
self._init (soll_betrag, haben_betrag, konto = konto)
self.desc = "%-12s %02d" % (desc, self.dtuple.month)
# end def __init__
# end class Ust_Gegenbuchung
class Privatanteil (Account_Entry) :
def __init__ (self, gegen_konto, soll_betrag, haben_betrag, desc, konto = "") :
self.date = "31.12."
self._init (soll_betrag, haben_betrag, gegen_konto, konto)
self.desc = "%-12s" % (desc, )
# end def __init__
# end class Privatanteil
class Account (_Base_):
"""Account for a specific time interval (e.g., month, quarter, or year)"""
Entry = Account_Entry
### The following class variables can be overriden in a config file
### (e.g., ATAX.config)
ignore = ()
gewerbe_anteil = 0
privat = {}
vat_privat = {} ### VAT rate per account in `.privat`
vat_privat_default = 1.20 ### default VAT rate applicable for private part
def __init__ (self, name = "", vst_korrektur = 1.0) :
self.name = name
self.vst_korrektur = vst_korrektur
self.entries = []
self.privat = dict (self.privat)
self.vat_privat = dict (self.vat_privat)
self.ignore = set (self.__class__.ignore)
self._finished = False
# end def __init__
account_vars = set (("vst_korrektur", "firma", "gewerbe_anteil"))
def add_file (self, file_name, categ_interest, source_currency) :
file = open (file_name, "rb")
try :
self.add_lines (file, categ_interest, source_currency)
finally :
file.close ()
# end def add_file
def add_lines (self, lines, categ_interest, source_currency) :
"""Add entries given by `lines' to account `self'."""
self.source_currency = source_currency
entries = []
for line in lines :
line = self._decoded (line)
if ignor_pat.match (line) or empty_pat.match (line) :
continue
line = ws_head_pat.sub ("", line, count = 1)
line = ws_tail_pat.sub ("", line, count = 1)
if not line : continue
if code_pat.match (line) :
self.eval_line (line)
else :
entry = self.Entry \
(line, self.source_currency, self.vst_korrektur)
if (categ_interest.search (entry.cat) and entry.is_change) :
entries.append (entry)
entries.sort (key = TFL.Getter.time)
self.entries.extend (entries)
for entry in entries :
self.add (entry)
# end def add_lines
def eval_line (self, line) :
line = code_pat.sub ("", line, count = 1)
if perl_dict_pat.search (line) :
line = perl_dict_pat.sub ("""["\\1"]""", line)
### use temporary dict for exec
### (`locals ()' cannot be changed by `exec')
tmp = { "privat" : {}, "vat_privat" : {}, "ignore" : {}}
try :
### `tmp' must be passed to the local-dict argument because
### Python adds some junk to the global-dict argument of `exec'
exec (line, {}, tmp)
if "source_currency" in tmp :
self.source_currency = EUC.Table [tmp ["source_currency"]]
del tmp ["source_currency"]
self.privat.update (tmp ["privat"])
self.vat_privat.update (tmp ["vat_privat"])
self.ignore.update (tmp ["ignore"])
del tmp ["privat"]
del tmp ["vat_privat"]
del tmp ["ignore"]
try :
for k, v in tmp.items () :
if k in self.account_vars :
setattr (self, k, v)
else :
pyk.fprint ("Invalid assignment `%s'" % line)
except Exception as exc :
pyk.fprint \
( "Exception `%s' during local-dict update `%s'"
% (exc, tmp_cmd)
)
except Exception as exc :
pyk.fprint \
( "Exception `%s' encountered during execution of line"
"\n `%s'"
% (exc, line)
)
# end def eval_line
# end class Account
class V_Account (Account) :
"""VAT Account for a specific time interval (e.g., month, quarter, or year)"""
### The following class variables can be overriden in a config file
### (e.g., ATAX.config)
kz_add = \
{ "????" : ("027", "In 060/065 enthaltene Vorsteuern betreffend KFZ")
}
def __init__ (self, name = "", vst_korrektur = 1.0) :
Account.__init__ (self, name, vst_korrektur)
self.ausgaben_b = EUR (0)
self.ausgaben_n = EUR (0)
self.erwerb_igE = EUR (0)
self.reverse_charge = EUR (0)
self.umsatz = EUR (0)
self.umsatz_frei = EUR (0)
self.ust = EUR (0)
self.ust_igE = EUR (0)
self.ust_revCharge = EUR (0)
self.vorsteuer = EUR (0)
self.ust_gut = EUR (0)
self.vorsteuer_igE = EUR (0)
self.vorsteuer_revCh = EUR (0)
self.vorsteuer_EUst = EUR (0)
self.erwerb_igE_dict = defaultdict (EUR)
self.reverse_charge_dict = defaultdict (EUR)
self.umsatz_dict = defaultdict (EUR)
self.ust_dict = defaultdict (EUR)
self.ust_igE_dict = defaultdict (EUR)
self.ust_revC_dict = defaultdict (EUR)
self.vorsteuer_kzs = defaultdict (EUR)
self.umsatzsteuer_entries = []
self.vorsteuer_entries = []
self.vorsteuer_entries_igE = []
self.vorsteuer_entries_revC = []
self.vorsteuer_entries_EUst = []
# end def __init__
def add (self, entry) :
"""Add `entry' to U_Account."""
assert (isinstance (entry, Account_Entry))
vst_korrektur = entry.vst_korrektur
netto = entry.netto
gross = entry.gross
vat = entry.vat
vat_p = entry.vat_p
if "-" in entry.dir :
self.ausgaben_b += gross
self.ausgaben_n += netto
if "i" in entry.cat :
self.vorsteuer_igE += vat
self.ust_igE += vat
self.vorsteuer_entries_igE.append (entry)
entry.cati = "i"
self.ust_igE_dict [vat_p] += vat
self.erwerb_igE += netto
self.erwerb_igE_dict [vat_p] += netto
if vst_korrektur != 1.0 :
sys.stderr.write \
( "Cannot handle expenses resulting from "
"innergemeinschaftlichem Erwerb for a "
"VAT correction of %d\n %s\n"
% (vst_korrektur, entry)
)
elif "b" == entry.g_or_n :
sys.stderr.write \
( "**** igE entries must be netto****\n %s\n"
% entry
)
if entry.konto in self.kz_add :
self.vorsteuer_kzs [self.kz_add [entry.konto]] += vat
elif "r" in entry.cat :
self.vorsteuer_revCh += vat
self.ust_revCharge += vat
self.vorsteuer_entries_revC.append (entry)
entry.cati = "r"
self.ust_revC_dict [vat_p] += vat
self.reverse_charge += netto
self.reverse_charge_dict [vat_p] += netto
if vst_korrektur != 1.0 :
sys.stderr.write \
( "Cannot handle expenses resulting from "
"reverse Charge for a "
"VAT correction of %d\n %s\n"
% (vst_korrektur, entry)
)
elif "b" == entry.g_or_n :
sys.stderr.write \
( "**** reverse Charge entries must be netto****\n"
" %s\n"
% entry
)
if entry.konto in self.kz_add :
self.vorsteuer_kzs [self.kz_add [entry.konto]] += vat
elif "z" in entry.cat :
self.vorsteuer_EUst += vat
self.vorsteuer_entries_EUst.append (entry)
entry.cati = "E"
if vst_korrektur != 1.0 :
sys.stderr.write \
( "Cannot handle Einfuhrumsatzsteuer for a "
"VAT correction of %d\n %s\n"
% (vst_korrektur, entry)
)
else : ### neither "i" nor "z" nor "r"
self.vorsteuer_entries.append (entry)
if entry.minderung :
## Erlösminderung instead of Ausgabe
self._add_umsatz (- netto, - vat, vat_p, entry)
else :
self._add_vorsteuer (vat)
if entry.konto in self.kz_add :
self.vorsteuer_kzs [self.kz_add [entry.konto]] += vat
elif "+" in entry.dir :
self.umsatzsteuer_entries.append (entry)
if "i" in entry.cat :
sys.stderr.write \
( "Cannot handle income resulting from "
"innergemeinschaftlichem Erwerb\n %s\n"
% entry
)
if "r" in entry.cat :
sys.stderr.write \
( "Cannot handle income resulting from "
"reverse Charge\n %s\n"
% entry
)
if entry.minderung :
## Ausgabenminderung instead of Einnahme
self.ust_gut += vat
else :
self._add_umsatz (netto, vat, vat_p, entry)
# end def add
def _add_umsatz (self, netto, vat, vat_p, entry = None) :
self.ust += vat
self.umsatz += netto
self.umsatz_dict [vat_p] += netto
if entry and entry.g_or_n == "f" :
self.umsatz_frei += netto
elif vat_p != 1.0 :
self.ust_dict [vat_p] += vat
# end def _add_umsatz
def _add_vorsteuer (self, vat) :
self.vorsteuer += vat
# end def _add_vorsteuer
def finish (self) :
if not self._finished :
self._finished = True
netto = defaultdict (EUR)
p_vat = defaultdict (EUR)
vat_p = self.vat_privat
vat_pd = self.vat_privat_default
for entry in self.vorsteuer_entries :
k = entry.konto
if k in self.privat and entry.netto > 0 :
vat_p.setdefault (k, vat_pd)
factor = self.privat [k] / 100.0
corr = entry.netto * factor
netto [k] += corr
p_vat [k] += corr * (vat_p [k] - 1.00)
for k, n in pyk.iteritems (netto) :
if n :
self._add_umsatz (n, p_vat [k], vat_p [k])
# end def finish
def header_string (self) :
return " %s %8s%7s %8s %8s %-5s %-5s %s %s\n%s" % \
( "Tag"
, "MSt-Satz"
, "MWSt"
, "Netto"
, "Brutto"
, "Soll"
, "Haben"
, " "
, "Text"
, "=" * 80
)
# end def header_string
def print_entries (self, trailer = None) :
"""Print `self.entries' followed by trailer"""
self.print_entries_ (self.entries, self.header_string (), trailer)
# end def print_entries
def print_entries_by_group (self, trailer = None) :
"""Print `self.vorsteuer_entries', `self.vorsteuer_entries_igE', and
`self.umsatzsteuer_entries' followed by trailer.
"""
if ( self.vorsteuer_entries
or self.vorsteuer_entries_EUst
or self.vorsteuer_entries_igE
or self.vorsteuer_entries_revC
or self.umsatzsteuer_entries
) :
pyk.fprint (self.header_string ())
self.print_entries_ (self.vorsteuer_entries, trailer = "\n")
self.print_entries_ (self.vorsteuer_entries_EUst, trailer = "\n")
self.print_entries_ (self.vorsteuer_entries_igE, trailer = "\n")
self.print_entries_ (self.vorsteuer_entries_revC, trailer = "\n")
self.print_entries_ (self.umsatzsteuer_entries)
if ( self.vorsteuer_entries
or self.vorsteuer_entries_EUst
or self.vorsteuer_entries_igE
or self.vorsteuer_entries_revC
or self.umsatzsteuer_entries
) :
if trailer :
pyk.fprint (trailer)
# end def print_entries_by_group
def print_entries_ (self, entries, header = None, trailer = None) :
if entries :
if header :
pyk.fprint (header)
for e in entries :
pyk.fprint (e)
if trailer :
pyk.fprint (trailer)
# end def print_entries_
def print_summary_old (self) :
"""Print summary for Umsatzsteuervoranmeldung."""
vat_saldo = self._vat_saldo ()
meldung = ("Überschuss", "Vorauszahlung") [vat_saldo >= 0]
sign = (-1.0, +1.0) [vat_saldo >= 0]
pyk.fprint \
( "%-16s : %14s"
% ("Ausgaben brutto", self.ausgaben_b.as_string_s ())
)
pyk.fprint \
( "%-16s : %14s"
% ("Ausgaben netto", self.ausgaben_n.as_string_s ())
)
pyk.fprint ("\n%s\n" % ( "=" * 80, ))
pyk.fprint ("%-16s : %14s" % ( "Umsatz", self.umsatz.as_string_s ()))
pyk.fprint \
( "%-16s : %14s"
% ( "Steuerpflichtig"
, (self.umsatz - self.umsatz_dict [1.0]).as_string_s ()
)
)
self.print_ust_dict_ ( self.umsatz_dict, self.ust_dict)
pyk.fprint \
( "\n%-16s : %14s"
% ("Erwerbe igE", self.erwerb_igE.as_string_s ())
)
self.print_ust_dict_ ( self.erwerb_igE_dict, self.ust_igE_dict)
pyk.fprint \
( "\n%-16s : %14s"
% ("Reverse Charge", self.reverse_charge.as_string_s ())
)
self.print_ust_dict_ ( self.reverse_charge_dict, self.ust_revC_dict)
pyk.fprint \
( "\n%-16s : %14s"
% ( "USt Übertrag"
, (self.ust + self.ust_igE + self.ust_revCharge).as_string_s ()
)
)
pyk.fprint ("--------------- ./.. ---------------------------")
pyk.fprint \
( "%-16s : %14s"
% ( "USt Übertrag"
, (self.ust + self.ust_igE + self.ust_revCharge).as_string_s ()
)
)
pyk.fprint \
( "%-16s : %14s"
% ( "Vorsteuer", self.vorsteuer.as_string_s ())
)
pyk.fprint \
( "%-16s : %14s"
% ( "Umsatzsteuer aus Gutschrift", self.ust_gut.as_string_s ())
)
pyk.fprint \
( "%-16s : %14s"
% ("Einfuhrumsatzst.", self.vorsteuer_EUst.as_string_s ())
)
pyk.fprint \
( "%-16s : %14s"
% ( "Vorsteuer igE", self.vorsteuer_igE.as_string_s ())
)
pyk.fprint \
( "%-16s : %14s %14s"
% ( "Summe Vorsteuer"
, ( self.vorsteuer - self.ust_gut + self.vorsteuer_EUst
+ self.vorsteuer_igE + self.vorsteuer_revCh
).as_string_s ()
, ( self.vorsteuer - self.ust_gut + self.vorsteuer_EUst
+ self.vorsteuer_igE + self.vorsteuer_revCh
).as_string_s ()
)
)
if vat_saldo.target_currency.to_euro_factor == 1.0 :
### no rounding for Euro
pyk.fprint \
( "\n%-16s : %14s %s"
% ( meldung
, vat_saldo.as_string_s (), vat_saldo.target_currency.name
)
)
else :
### rounding is necessary
pyk.fprint \
( "\n%-16s : %14s%s00 %s (%s)"
% ( meldung
, vat_saldo.as_string_s (round = 1)
, vat_saldo.target_currency.decimal_sign
, vat_saldo.target_currency.name
, vat_saldo.as_string_s ()
)
)
# end def print_summary_old
def print_summary (self) :
"""Print summary for Umsatzsteuervoranmeldung."""
vat_saldo = self._vat_saldo ()
meldung = ("Überschuss", "Vorauszahlung") [vat_saldo >= 0]
sign = (-1.0, +1.0) [vat_saldo >= 0]
umsatz_vat = self.umsatz - self.umsatz_frei
pyk.fprint \
( "%-30s : %29s"
% ("Ausgaben brutto", self.ausgaben_b.as_string_s ())
)
pyk.fprint \
( "%-30s : %29s"
% ("Ausgaben netto", self.ausgaben_n.as_string_s ())
)
pyk.fprint ("\n%s\n" % ( "=" * 80, ))
pyk.fprint \
( "%-30s %3s : %29s"
% ("Nichtsteuerbar Ausland", "005", self.umsatz_frei.as_string_s())
)
pyk.fprint \
( "%-30s %3s : %29s"
% ("Lieferungen, sonstige", "000", umsatz_vat.as_string_s ())
)
pyk.fprint \
( "%-30s : %29s"
% ("Summe Bemessungsgrundlage", umsatz_vat.as_string_s ())
)
pyk.fprint ()
pyk.fprint \
( "%-30s : %29s"
% ("Gesamt steuerpflichtig", umsatz_vat.as_string_s ())
)
self.print_ust_dict_ (self.umsatz_dict, self.ust_dict, self._ust_cat)
pyk.fprint \
( "%-30s %3s : %29s"
% ( "Reverse Charge §19", "057"
, self.vorsteuer_revCh.as_string_s ()
)
)
pyk.fprint ()
pyk.fprint \
("%-30s : %29s" % ( "USt Übertrag", self.ust.as_string_s ()))
pyk.fprint ("-"*33, "./..", "-" * 27)
pyk.fprint \
("%-30s : %29s" % ( "USt Übertrag", self.ust.as_string_s ()))
pyk.fprint \
( "%-30s %3s : %14s"
% ("Erwerbe igE", "070", self.erwerb_igE.as_string_s ())
)
self.print_ust_dict_ \
(self.erwerb_igE_dict, self.ust_igE_dict, self._ige_cat)
pyk.fprint \
( "%-30s %3s : %29s"
% ("Vorsteuer", "060", self.vorsteuer.as_string_s ())
)
pyk.fprint \
( "%-30s %3s : %29s"
% ("Einfuhrumsatzsteuer", "061", self.vorsteuer_EUst.as_string_s ())
)
pyk.fprint \
( "%-30s %3s : %29s"
% ("Vorsteuer igE", "065", self.vorsteuer_igE.as_string_s ())
)
pyk.fprint \
( "%-30s %3s : %29s"
% ( "Reverse Charge §19", "066"
, self.vorsteuer_revCh.as_string_s ()
)
)
for (k, d), vst in sorted (pyk.iteritems (self.vorsteuer_kzs)) :
pyk.fprint ("%-30.30s %3s : %29s" % (d, k, vst.as_string_s ()))
pyk.fprint \
( "%-30s %3s : %29s"
% ( "Umsatzsteuer aus Gutschrift", "090"
, self.ust_gut.as_string_s ()
)
)
pyk.fprint \
( "%-30s : %29s"
% ( "Gesamtbetrag Vorsteuer"
, ( self.vorsteuer - self.ust_gut + self.vorsteuer_EUst
+ self.vorsteuer_igE + self.vorsteuer_revCh
).as_string_s ()
)
)
pyk.fprint ()
if vat_saldo.target_currency.to_euro_factor == 1.0 :
### no rounding for Euro
pyk.fprint \
( "%-30s %3s : %29s %s"
% ( meldung, "095"
, vat_saldo.as_string_s (), vat_saldo.target_currency.name
)
)
else :
### rounding is necessary
pyk.fprint \
( "%-30s %3s : %29s%s00 %s (%s)"
% ( meldung, "095"
, vat_saldo.as_string_s (round = 1)
, vat_saldo.target_currency.decimal_sign
, vat_saldo.target_currency.name
, vat_saldo.as_string_s ()
)
)
# end def print_summary
def print_summary_online (self) :
"""Print summary for Umsatzsteuervoranmeldung for online entry."""
vat_saldo = self._vat_saldo ()
meldung = ("Überschuss", "Vorauszahlung") [vat_saldo >= 0]
sign = (-1.0, +1.0) [vat_saldo >= 0]
umsatz_vat = self.umsatz - self.umsatz_frei
pyk.fprint \
("*** Lieferungen, sonstige Leistungen und Eigenverbrauch ***")
pyk.fprint ("=" * 67)
if umsatz_vat :
pyk.fprint \
( "%-50s %3s : %10s"
% ("Lieferungen, sonstige", "000", umsatz_vat.as_string_s ())
)
if self.umsatz_frei :
pyk.fprint \
( "%-50s %3s : %10s"
% ( "Nichtsteuerbar Ausland", "005"
, self.umsatz_frei.as_string_s ()
)
)
if umsatz_vat :
pyk.fprint ()
pyk.fprint ("Steuersätze")
pyk.fprint ("=" * 67)
self.print_ust_dict_online (self.umsatz_dict, self._ust_cat)
if self.vorsteuer_revCh :
pyk.fprint \
( "\n%-50s %3s : %10s"
% ( "Reverse Charge §19", "057"
, self.vorsteuer_revCh.as_string_s ()
)
)
pyk.fprint ("\n\n")
pyk.fprint ("*** Innergemeinschaftliche Erwerbe ***")
pyk.fprint ("=" * 67)
if self.erwerb_igE :
pyk.fprint \
( "%-50s %3s : %10s"
% ("Erwerbe igE", "070", self.erwerb_igE.as_string_s ())
)
pyk.fprint ()
pyk.fprint ("Steuersätze")
pyk.fprint ("=" * 47)
self.print_ust_dict_online (self.erwerb_igE_dict, self._ige_cat)
pyk.fprint ("\n\n")
pyk.fprint ("*** Vorsteuer ***")
pyk.fprint ("=" * 67)
pyk.fprint \
( "%-50s %3s : %10s"
% ("Vorsteuer", "060", self.vorsteuer.as_string_s ())
)
if self.vorsteuer_EUst:
pyk.fprint \
( "%-50s %3s : %10s"
% ( "Einfuhrumsatzsteuer", "061"
, self.vorsteuer_EUst.as_string_s ()
)
)
if self.vorsteuer_igE :
pyk.fprint \
( "%-50s %3s : %10s"
% ("Vorsteuer igE", "065", self.vorsteuer_igE.as_string_s ())
)
if self.vorsteuer_revCh :
pyk.fprint \
( "%-50s %3s : %10s"
% ( "Reverse Charge §19", "066"
, self.vorsteuer_revCh.as_string_s ()
)
)
for (k, d), vst in sorted (pyk.iteritems (self.vorsteuer_kzs)) :
pyk.fprint ("%-50.50s %3s : %10s" % (d, k, vst.as_string_s ()))
if self.ust_gut :
pyk.fprint \
( "%-50s %3s : %10s"
% ( "Umsatzsteuer auf Gutschrift", "090"
, self.ust_gut.as_string_s ()
)
)
pyk.fprint ("\n\n")
pyk.fprint ("*" * 67)
if vat_saldo.target_currency.to_euro_factor == 1.0 :
### no rounding for Euro
pyk.fprint \
( "%-50s %3s : %10s %s"
% ( meldung, "095"
, vat_saldo.as_string_s (), vat_saldo.target_currency.name
)
)
else :
### rounding is necessary
pyk.fprint \
( "%-50s %3s : %10s%s00 %s (%s)"
% ( meldung, "095"
, vat_saldo.as_string_s (round = 1)
, vat_saldo.target_currency.decimal_sign
, vat_saldo.target_currency.name
, vat_saldo.as_string_s ()
)
)
# end def print_summary_online
_ust_cat = {20 : "022", 10 : "029", 0 : ""}
_ige_cat = {20 : "072", 10 : "073", 0 : ""}
def print_ust_dict_ (self, umsatz_dict, ust_dict, cat) :
for vat_p in sorted (umsatz_dict, reverse = True) :
vp = int (((vat_p - 1) * 100) + 0.5)
pyk.fprint \
( "davon %2d%% %3s : %14s %14s"
% ( vp, cat [vp]
, umsatz_dict [vat_p].as_string_s ()
, ust_dict [vat_p].as_string_s ()
)
)
# end def print_ust_dict_
def print_ust_dict_online (self, umsatz_dict, cat) :
for vat_p in sorted (umsatz_dict, reverse = True) :
ust = umsatz_dict [vat_p]
if ust :
vp = int (((vat_p - 1) * 100) + 0.5)
hd = "davon %2d%%" % (vp, )
pyk.fprint \
("%-50s %3s : %10s" % (hd, cat [vp], ust.as_string_s ()))
# end def print_ust_dict_online
def _vat_saldo (self) :
### recompute `ust` instead of using `self.ust` to avoid 1-cent
### rounding errors
ust = sum \
( ((v*r - v) for (r, v) in pyk.iteritems (self.umsatz_dict))
, EUR (0)
)
return ust + self.ust_gut - self.vorsteuer - self.vorsteuer_EUst
# end def _vat_saldo
# end class U_Account
class T_Account (Account) :
"""Account total for a specific time interval (e.g., month, quarter, or year.)"""
### The following class variables can be overriden in a config file
### (e.g., ATAX.config)
eust_gkonto = "9997"
ige_gkonto = "9998"
rvc_gkonto = "9996"
ust_gkonto = "9999"
vorsteuer_gkonto = "9999"
t_konto_ignore_pat = Regexp (r"^[01239]\d\d\d\d?", re.X)
firma = "<<<Specify in config file, e.g., ATAX.config>>>"
calculation_method = \
"Das Ergebnis wurde gemäß Par.4/3 EStG nach der Nettomethode erstellt"
def __init__ (self, name = "", year = 0, konto_desc = None, vst_korrektur = 1.0) :
Account.__init__ (self, name, vst_korrektur)
self.year = year or \
day_to_time_tuple ("1.1").year - 1
self.konto_desc = konto_desc or {}
self.soll_saldo = defaultdict (EUR)
self.haben_saldo = defaultdict (EUR)
self.ausgaben = defaultdict (EUR)
self.einnahmen = defaultdict (EUR)
self.vorsteuer = defaultdict (EUR)
self.vorsteuer_EUst = defaultdict (EUR)
self.ust = defaultdict (EUR)
self.ust_igE = defaultdict (EUR)
self.ust_revCharge = defaultdict (EUR)
self.buchung_zahl = defaultdict (int)
self.ausgaben_total = EUR (0)
self.einnahmen_total = EUR (0)
self.vorsteuer_total = EUR (0)
self.ust_total = EUR (0)
self.k_entries = defaultdict (list)
self.kblatt = defaultdict (list)
# end def __init__
def add (self, entry) :
"""Add `entry' to account `self'."""
assert (isinstance (entry, Account_Entry))
self.buchung_zahl [entry.konto] += 1
self.buchung_zahl [entry.gegen_konto] += 1
self.kblatt [entry.konto].append (entry.kontenzeile ())
self.k_entries [entry.konto].append (entry)
if "-" in entry.dir :
self.soll_saldo [entry.konto] += entry.soll_betrag
self.haben_saldo [entry.gegen_konto] += entry.soll_betrag
if ( (not self.t_konto_ignore_pat.match (entry.konto))
or ("u" in entry.cat)
) :
betrag = self._effective_amount (entry, entry.soll_betrag)
self._add_ausgabe (entry, betrag, entry.vat)
else :
pass #print "%s not added to self.ausgaben" % entry.konto
elif "+" in entry.dir :
self.haben_saldo [entry.konto] += entry.haben_betrag
self.soll_saldo [entry.gegen_konto] += entry.haben_betrag
if ( (not self.t_konto_ignore_pat.match (entry.konto))
or ("u" in entry.cat)
) :
betrag = self._effective_amount (entry, entry.haben_betrag)
self._add_einnahme (entry, betrag, entry.vat)
else :
pass #print "%s not added to self.einnahmen" % entry.konto
# end def add
def _effective_amount (self, entry, amount) :
if ( self.t_konto_ignore_pat.match (entry.konto)
and "u" in entry.cat
) or entry.konto in self.ignore :
return 0
else :
return amount
# end def _effective_amount
def _add_ausgabe (self, entry, betrag, vat) :
if "u" in entry.cat :
if "i" in entry.cat :
vat_dict = self.ust_igE
elif "r" in entry.cat :
vat_dict = self.ust_revCharge
elif "z" in entry.cat :
vat_dict = self.vorsteuer_EUst
else :
vat_dict = self.vorsteuer
vat_dict [entry.month] += vat
self.vorsteuer_total += vat
self.ausgaben [entry.konto] += betrag
self.ausgaben_total += betrag
# end def _add_ausgabe
def _add_einnahme (self, entry, betrag, vat) :
if "u" in entry.cat :
if "i" in entry.cat :
ust_dict = self.ust_igE
elif "r" in entry.cat :
ust_dict = self.ust_revCharge
else :
if "z" in entry.cat :
pyk.fprint \
("*** Einnahme cannot include Einfuhrumsatzsteuer")
pyk.fprint (entry.line)
ust_dict = self.ust
ust_dict [entry.month] += vat
self.ust_total += vat
self.einnahmen [entry.konto] += betrag
self.einnahmen_total += betrag
# end def _add_einnahme
def finish (self) :
if not self._finished :
self._finished = True
self._do_gkonto \
( self.vorsteuer, self.vorsteuer_gkonto
, self.soll_saldo, "Vorsteuer"
, lambda s : (s, 0)
)
self._do_gkonto \
( self.vorsteuer_EUst, self.eust_gkonto
, self.soll_saldo, "Einfuhrumsatzsteuer"
, lambda s : (s, 0)
)
self._do_gkonto \
( self.ust, self.ust_gkonto
, self.haben_saldo, "Umsatzsteuer"
, lambda s : (0, s)
)
self._do_gkonto \
( self.ust_igE, self.ige_gkonto
, self.soll_saldo, "Vor- und Umsatzsteuer igE"
, lambda s : (s, s)
, self.haben_saldo
)
self._do_gkonto \
( self.ust_revCharge, self.rvc_gkonto
, self.soll_saldo, "Vor- und Umsatzsteuer rev. Ch."
, lambda s : (s, s)
, self.haben_saldo
)
for k in sorted (self.kblatt) :
if k in self.privat :
pa = self.privat [k]
factor = pa / 100.0
p_soll = self.soll_saldo [k] * factor
p_haben = self.haben_saldo [k] * factor
p_entry = self._fix_privat_anteil \
(k, pa, factor, p_soll, p_haben)
k_desc = self.konto_desc.get (k, "")
vat_pd = self.vat_privat_default
self.ausgaben [k] *= (1 - factor)
self.einnahmen [k] *= (1 - factor)
self.konto_desc [k] = "%s (abz. %s)" % \
(k_desc, p_entry.desc)
p_vat = p_soll * (self.vat_privat.get (k, vat_pd) - 1.00)
self._fix_ust_privat (k, k_desc, p_vat, p_entry.desc)
else :
for ke in self.k_entries [k] :
if ke.p_konto in self.privat :
pa = self.privat [ke.p_konto]
factor = pa / 100.0
p_soll = ke.soll_betrag * factor
p_haben = ke.haben_betrag * factor
self.ausgaben [k] -= p_soll
self.einnahmen [k] -= p_haben
self._fix_privat_anteil \
(k, pa, factor, p_soll, p_haben, ke.desc)
# end def finish
def _do_gkonto (self, ust, gkonto, saldo, txt, soll_haben, saldo2 = None) :
for m in sorted (ust) :
if ust.get (m, 0) != 0 :
self.buchung_zahl [gkonto] += 1
saldo [gkonto] += ust [m]
if saldo2 is not None :
saldo2 [gkonto] += ust [m]
s, h = soll_haben (ust [m])
self.kblatt [gkonto].append \
(Ust_Gegenbuchung (m, gkonto, s, h, txt).kontenzeile ())
# end def _do_gkonto
def _fix_privat_anteil (self, k, pa, factor, p_soll, p_haben, desc = "") :
p_desc = ("%2d%% Privatanteil %s" % (int (pa + 0.5), desc)).strip ()
p_entry = Privatanteil ("9200", -p_soll, -p_haben, p_desc)
self.soll_saldo [k] -= p_soll
self.haben_saldo [k] -= p_haben
self.kblatt [k].append (p_entry.kontenzeile ())
return p_entry
# end def _fix_privat_anteil
def _fix_ust_privat (self, k, k_desc, p_vat, p_desc) :
p_desc = "%s [%s (%s)]" % (p_desc, k, k_desc)
gkonto = self.ust_gkonto
self.buchung_zahl [gkonto] += 1
self.haben_saldo [gkonto] += p_vat
self.kblatt [gkonto].append \
(Privatanteil ("9200", 0, p_vat, p_desc, gkonto).kontenzeile ())
# end def _fix_ust_privat
def print_konten (self) :
self.finish ()
tc = EUR.target_currency.name
for k in sorted (self.kblatt) :
head = "%s %s" % (self.year, self.konto_desc.get (k, "")) [:64]
tail = "Konto-Nr. %5s" % k
belly = " " * (79 - len (head) - len (tail))
pyk.fprint ("\n\n%s%s%s" % (head, belly, tail))
self.print_sep_line ()
pyk.fprint \
( "%-4s %-6s %-35s%-3s %12s %12s"
% ("Tag", "Gegkto", "Text", "Ust", "Soll", "Haben")
)
self.print_sep_line ()
pyk.fprint ("\n".join (self.kblatt [k]))
pyk.fprint \
( "\n%12s %-38s %12s %12s"
% ( "", "Kontostand neu"
, self.soll_saldo [k].as_string_s ()
, self.haben_saldo [k].as_string_s ()
)
)
pyk.fprint \
( "\n%12s %-48s %12s %s"
% ( ""
, "Saldo neu"
, (self.soll_saldo [k] - self.haben_saldo [k]).as_string_s ()
, tc
)
)
self.print_sep_line ()
# end def print_konten
def print_konto_summary (self) :
self.finish ()
tc = EUR.target_currency.name
pyk.fprint ("Zusammenfassung Konten %-52s %s" % (self.year, tc))
self.print_sep_line ()
pyk.fprint \
( "%-5s %12s %12s %12s %s"
% ("Konto", "Soll", "Haben", "Saldo", "Text")
)
self.print_sep_line ()
for k in sorted (self.kblatt) :
pyk.fprint \
( "%-5s%13s%13s %13s %s"
% ( k
, self.soll_saldo [k].as_string_s ()
, self.haben_saldo [k].as_string_s ()
, (self.soll_saldo [k] - self.haben_saldo [k]).as_string_s ()
, self.konto_desc.get (k, "") [:33]
)
)
# end def print_konto_summary
def print_sep_line (self) :
pyk.fprint ("%s" % ("=" * 79, ))
# end def print_sep_line
def print_ein_aus_rechnung (self) :
self.finish ()
tc = EUR.target_currency.name
pyk.fprint (self.firma)
pyk.fprint \
( underlined
("Einnahmen/Ausgabenrechnung %s (%s)" % (self.year, tc))
)
pyk.fprint ("\n")
pyk.fprint (underlined ("Betriebseinnahmen"))
pyk.fprint ("\n")
format = "%-40s %15s %15s"
format1 = "%-40s -%14s %15s"
e_total = EUR (0)
for k in sorted (self.einnahmen) :
einnahmen = self.einnahmen [k] - self.ausgaben [k]
if k [0] not in ("4", "8") : continue
if einnahmen == 0 : continue
e_total = e_total + einnahmen
pyk.fprint \
( format
% ( self.konto_desc.get (k, "") [:40], ""
, einnahmen.as_string_s ()
)
)
pyk.fprint (format % ("", "", "_" * 15))
pyk.fprint (format % ("", "", e_total.as_string_s ()), tc)
pyk.fprint ("\n")
pyk.fprint (underlined ("Betriebsausgaben"))
pyk.fprint ("\n")
a_total = EUR (0)
for k in sorted (self.ausgaben) :
ausgaben = self.ausgaben [k] - self.einnahmen [k]
if k [0] not in ("5", "6", "7") : continue
if ausgaben == 0 : continue
a_total = a_total + ausgaben
pyk.fprint \
( format
% ( self.konto_desc.get (k, "") [:40]
, ausgaben.as_string_s (), ""
)
)
if self.vst_korrektur != 1 :
p_anteil = a_total * (1 - self.vst_korrektur)
pyk.fprint (format % ( "", "_" * 15, ""))
pyk.fprint (format % ( "", a_total.as_string_s (), ""))
pyk.fprint \
( format1
% ( "Privatanteil %5.2f%%" % ((1 - self.vst_korrektur) * 100, )
, p_anteil.as_string_s (), ""
)
)
if self.gewerbe_anteil :
self.g_anteil = g_anteil = a_total * self.gewerbe_anteil
pyk.fprint \
( format1
% ( "Gewerbeanteil %5.2f%%" % (self.gewerbe_anteil * 100, )
, g_anteil.as_string_s (), ""
)
)
else :
g_anteil = 0
a_total = a_total - p_anteil - g_anteil
pyk.fprint (format % ( "", "_" * 15, "_" * 15))
pyk.fprint \
( format1
% ( ""
, a_total.as_string_s ()
, (e_total - a_total).as_string_s ()
)
, tc
)
pyk.fprint ("\n%s." % (self.calculation_method, ))
# end def print_ein_aus_rechnung
g_anteil = EUR (0)
# end class T_Account
class H_Account_Entry (Account_Entry) :
"""Entry of H_Account"""
Ancestor = __Ancestor = Account_Entry
def __init__ (self, * args, ** kw) :
self.__Ancestor.__init__ (self, * args, ** kw)
if "-" in self.dir and self.vat_p != 1 :
self.netto += self.vat_vstk
self.soll_betrag += self.vat_vstk
# end def __init__
def kontenzeile (self) :
try :
return \
( "\\Einzelposten{%s\\hfill %s}{%s%s}"
% (self.desc, self.date, self.dir, self.netto.as_string ())
)
except Exception as exc :
pyk.fprint (exc)
pyk.fprint (sorted (self.__dict__.items ()))
raise
# end def kontenzeile
# end class H_Account_Entry
class H_Account (T_Account) :
"""House account for a specific time interval."""
Ancestor = __Ancestor = T_Account
Entry = H_Account_Entry
ignore = () ### DON'T IGNORE ANYTHING HERE
t_konto_ignore_pat = Regexp (r"^DON'T MATCH ANYTHING HERE$", re.X)
def _effective_amount (self, entry, amount) :
return amount
# end def _effective_amount
# end class H_Account
class Konto_Desc (dict) :
"""Model kontenplan of austrian accounting system"""
def __init__ (self, file) :
dict.__init__ (self)
k_d = self
d_k = self.reverse = {}
if isinstance (file, str) :
file = open (file, "rb")
pat = Regexp (r"^[0-9]")
s_pat = Regexp (r"\s*:\s*")
for l in file :
line = pyk.decoded (l, "utf-8", "iso-8859-15")
if not pat.match (line) :
continue
(konto, desc) = s_pat.split (line)
konto = konto.replace ("_", "0").strip ()
desc = desc.replace ('"', "" ).strip ()
k_d [konto] = desc
d_k [desc] = konto
# end def __init__
# end class Konto_Desc
class _ATAX_Command_ (TFL.Meta.Object) :
"""Main class for accounting scripts"""
_real_name = "Command"
default_categories = "u"
max_args = -1
min_args = 0
def __init__ (self) :
cmd = self._command ()
self.load_config (cmd)
if cmd.all :
categories = "."
else :
categories = "[" + "".join (cmd.categories) + "]"
categories = Regexp (categories)
source_currency = cmd.source_currency
vst_korrektur = cmd.vst_korrektur
account = self._create_account \
(cmd, categories, source_currency, vst_korrektur)
self._add_files (cmd, account, categories, source_currency)
self._output (cmd, account, categories, source_currency)
# end def __init__
def _command (self) :
cmd = TFL.CAO.Cmd \
( args = self._arg_spec ()
, opts = self._opt_spec ()
, min_args = self.min_args
, max_args = self.max_args
, description = self.__doc__ or ""
)
return cmd ()
# end def _command
@classmethod
def load_config (cls, cmd) :
globs = globals ()
cf_dict = dict (ATAX = ATAX)
for cf in cmd.Config :
TFL.load_config_file (cf, globs, cf_dict)
# end def load_config
def _add_files (self, cmd, account, categories, source_currency) :
if cmd.stdin :
assert cmd.argn == 0
account.add_lines (sys.stdin, categories, source_currency)
else :
for f_or_p in cmd.argv :
for file_name in glob.glob (f_or_p) :
account.add_file (file_name, categories, source_currency)
# end def _add_files
def _arg_spec (self) :
return ("file:P", )
# end def _arg_spec
def _opt_spec (self) :
return \
( "-all:B"
, "-categories:S,=%s" % self.default_categories
, "-Config:P,?Config file(s)"
, "-stdin:B?Read data from standard input"
, "-vst_korrektur:F=1.0"
, TFL.CAO.Arg.EUC_Source ()
, TFL.CAO.Arg.EUC_Target ()
, TFL.CAO.Opt.Output_Encoding (default = "utf-8")
)
# end def _opt_spec
Command = _ATAX_Command_ # end class
if __name__ != "__main__" :
ATAX._Export ("*")
### __END__ ATAX.accounting | ATAX | /ATAX-1.1.3.tar.gz/ATAX-1.1.3/accounting.py | accounting.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from _ATAX.accounting import *
class Command (Command) :
default_categories = "e"
min_args = 2
def _create_account (self, cmd, categories, source_currency, vst_korrektur) :
year = cmd.argv.pop (0)
konto_desc = Konto_Desc (cmd.argv.pop (0))
return T_Account \
( year = year
, konto_desc = konto_desc
, vst_korrektur = vst_korrektur
)
# end def _create_account
def _output (self, cmd, account, categories, source_currency) :
if cmd.plain :
ATAX._.accounting.underlined = identity
if not cmd.summary :
account.print_konto_summary ()
account.print_konten ()
print ("\f")
account.print_ein_aus_rechnung ()
if cmd.gewerbeanteil and account.g_anteil != 0 :
with open (cmd.gewerbeanteil, "wb") as gfile :
gfile.write \
( pyk.encoded
( """$source_currency = "%s";\n"""
% EUC.target_currency.name
)
)
gfile.write \
( pyk.encoded
( " 31.12. & & & %8.2f & b & 7001 & 2000 "
"& - & e & & Büroaufwand %s\n"
% (account.g_anteil, cmd.year)
)
)
# end def _output
def _arg_spec (self) :
return ("year:S", "kontenplan:P", "account_file:S")
# end def _arg_spec
def _opt_spec (self) :
return self.__super._opt_spec () + \
( "-gewerbeanteil:P?File to write gewerbe-anteil into"
, "-plain:B?Don't underline"
, "-summary:B"
)
# end def _opt_spec
# end class Command
if __name__ == "__main__":
Command ()
### __END__ ATAX.jahresabschluss | ATAX | /ATAX-1.1.3.tar.gz/ATAX-1.1.3/jahresabschluss.py | jahresabschluss.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from _ATAX import ATAX
from _CAL import CAL
from _TFL import TFL
from _ATAX.accounting import ignor_pat
from _TFL.predicate import *
from _TFL.pyk import pyk
from _TFL.Regexp import *
from _TFL._Meta.Once_Property import Once_Property
import _CAL.Date_Time
import _TFL._Meta.Object
import _TFL.CAO
import _TFL.Environment
@pyk.adapt__bool__
@pyk.adapt__str__
class FB_Entry (TFL.Meta.Object) :
"""Model one entry of a Fahrtenbuch"""
str_format = r" %-11s & %8d & %6d & %s"
tex_format = r" %-10s & %6d & %6d & %4d & %4d & \sf %s"
atax_format = \
" %s & & & %6.2f & b & 7500 & 2100 & - & e & & KM Geld (%5.1f km)"
def __init__ (self, date, km_start, km_finis, priv, desc, ** kw) :
self.date = date
self.km_start = float (km_start)
self.km_finis = float (km_finis)
self.priv = float (priv or 0)
self.desc, _, self.comment = (x.strip () for x in split_hst (desc, "%"))
self.__dict__.update (kw)
# end def __init__
@Once_Property
def delta (self) :
return self.km_finis - self.km_start
# end def delta
@Once_Property
def km_business (self) :
return self.delta * (1.0 - self.priv / 100.)
# end def km_business
@Once_Property
def km_private (self) :
return self.delta * (self.priv / 100.)
# end def km_private
def tex (self) :
date = self.date.formatted ("%d.%m.%Y")
bus_km = self.km_business
priv_km = self.km_private
desc = self.desc ### TeX-quote `desc`
return self.tex_format % \
(date, self.km_start, self.km_finis, bus_km, priv_km, desc)
# end def tex
def atax (self) :
date = self.date.formatted ("%d.%m")
km = self.km_business
f = 0.42 ### 0.42 Euro/km
if self.date < CAL.Date_Time (2008, 7, 1) :
f = 0.38
return self.atax_format % (date, f * km, km)
# end def atax
def __bool__ (self) :
return bool (self.delta)
# end def __bool__
def __str__ (self) :
date = self.date.formatted ("%d.%m.%Y")
return self.str_format % (date, self.km_finis, self.priv, self.desc)
# end def __str__
# end class FB_Entry
class Fahrtenbuch (TFL.Meta.Object) :
"""Model a Fahrtenbuch"""
lines_per_page = 50
Entry = FB_Entry
def __init__ (self, user, entries = []) :
self.user = user
self.entries = []
for e in entries :
self.add (e)
# end def __init__
def add (self, entry) :
self.entries.append (entry)
# end def add
@classmethod
def from_file (cls, user, file_name) :
result = cls (user)
add = result.add
last = None
with open (file_name, "rb") as file :
for l in result._read_lines (file) :
line = pyk.decoded (l, "utf-8", "iso-8859-1")
try :
d, km, priv, desc = \
[f.strip () for f in line.split ("&", 4)]
except ValueError :
pyk.fprint ("Split error `%s`" % line)
else :
last = result._new_entry (last, d, km, priv, desc)
add (last)
return result
# end def from_file
@property
def km_business (self) :
return sum (e.km_business for e in self.entries)
# end def km_business
def km_geld (self) :
result = ["""$source_currency = "EUR";"""]
for e in self.entries :
if e.km_business :
result.append (e.atax ())
return "\n".join (result)
# end def km_geld
@property
def km_private (self) :
return sum (e.km_private for e in self.entries)
# end def km_private
@property
def private_percent (self) :
kmb = self.km_business
kmp = self.km_private
result = 0
if kmp :
result = 100.0 / ((kmb + kmp) / kmp)
return result
# end def private_percent
def tex (self) :
result = []
entries = self.entries
if entries :
head = entries [0]
tail = entries [-1]
h_date = head.date.formatted ("%m/%Y")
t_date = tail.date.formatted ("%m/%Y")
dates = h_date
if h_date != t_date :
dates = "%s -- %s" % (h_date, t_date)
add = result.append
add ( ( r"\def\fahrtenbuchpageheader"
r"{\begin{Larger}\bf "
r"\strut\triline{Fahrtenbuch}{%s}{%s}"
r"\end{Larger}"
r"\par\vspace{0.0cm}"
r"}"
)
% (self.user, dates)
)
add (r"\begin{fahrtenbuch}")
i = 1
lpp = self.lines_per_page
for e in entries :
if e :
sep = [r"\\ ", r"\CR"] [(i % 5) == 0]
add ("%s %s" % (e.tex (), sep.strip ()))
i += 1
if i > lpp :
add (r"\end{fahrtenbuch}")
add (r"\\ \hbox{\strut}\hfill ./..%")
add (r"\begin{fahrtenbuch}")
i = 1
add (r"\hline\hline")
kmb = self.km_business
kmp = self.km_private
priv = self.private_percent
add ( FB_Entry.tex_format
% ( "Total", head.km_start, tail.km_finis, kmb, kmp
, r"\hfill Privatanteil = \percent{%5.2f} \CR" % priv
)
)
add (r"\end{fahrtenbuch}")
return "\n".join (result)
# end def tex
def _new_entry (self, last, d, km_finis, priv, desc) :
km_start = km_finis
if last is not None :
km_start = last.km_finis
return self.Entry \
(CAL.Date_Time.from_string (d), km_start, km_finis, priv, desc)
# end def _new_entry
def _read_lines (self, file) :
for l in file :
line = pyk.decoded (l.strip (), "utf-8", "iso-8859-1")
if line and not ignor_pat.match (line) :
yield line
# end def _read_lines
def __str__ (self) :
return "\n".join (pyk.text_type (e) for e in self.entries)
# end def __str__
# end class Fahrtenbuch
def _main (cmd) :
ATAX.Command.load_config (cmd)
fb = Fahrtenbuch.from_file (cmd.user, cmd.fahrtenbuch)
if not cmd.km_geld :
pyk.fprint (fb.tex ())
else :
pyk.fprint (fb.km_geld ())
# end def _main
_Command = TFL.CAO.Cmd \
( handler = _main
, args =
("fahrtenbuch:P?File defining fahrtenbuch", )
, min_args = 1
, max_args = 1
, opts =
( "Config:P,?Config file(s)"
, "user:S=%s" % TFL.Environment.username.capitalize ()
, "km_geld:B?Print the factoring for the KM Geld"
, TFL.CAO.Opt.Output_Encoding (default = "iso-8859-15")
)
)
if __name__ != "__main__" :
ATAX._Export ("*")
else :
_Command ()
### __END__ ATAX.fahrtenbuch | ATAX | /ATAX-1.1.3.tar.gz/ATAX-1.1.3/fahrtenbuch.py | fahrtenbuch.py |
Sphinx-PyPI-upload
==================
This package contains a `setuptools`_ command for uploading `Sphinx`_
documentation to the `Python Package Index`_ (PyPI) at the dedicated URL
packages.python.org.
.. _setuptools: http://pypi.python.org/pypi/setuptools
.. _Sphinx: http://sphinx.pocoo.org/
.. _`Python Package Index`: http://pypi.python.org/
The ``upload_sphinx`` command
------------------------------
``upload_sphinx`` will create the necessary zip file out of an arbitrary
documentation directory and posts it to the correct URL.
It's also loosely based on Sphinx' own setuptools command build_sphinx_
which allows to easily build documentation from the command line.
The ``upload_sphinx`` command has the following options:
- ``--repository (-r)``:
url of repository [default: http://pypi.python.org/pypi]
- ``--show-response``:
display full response text from server
- ``--upload-dir``:
directory to upload
.. _build_sphinx: http://bitbucket.org/birkenfeld/sphinx/src/tip/sphinx/setup_command.py
Example
--------
Assuming there is an ``Example`` package with Sphinx documentation to be
uploaded to http://packages.python.org, with the following structure::
Example/
|-- example.py
|-- setup.cfg
|-- setup.py
|-- docs
| |-- build
| | `-- html
| |-- conf.py
| |-- index.txt
| `-- tips_tricks.txt
As with any other setuptools based command, you can define useful defaults in
the setup.cfg of your Python package. The following snippet shows how to set
the option defaults of the ``build_sphinx`` and ``upload_sphinx`` setup.py
commands::
[build_sphinx]
source-dir = docs/
build-dir = docs/build
all_files = 1
[upload_sphinx]
upload-dir = docs/build/html
To build and upload the Sphinx documentation you are now able to run::
$ python setup.py build_sphinx
$ python setup.py upload_sphinx
Alternatively, you can of course just pass the appropriate options directly
to the commands::
$ python setup.py build_sphinx --source-dir=docs/ --build-dir=docs/build --all-files
$ python setup.y upload_sphinx --upload-dir=docs/build/html | ATD_document | /ATD_document-0.1.1.tar.gz/ATD_document-0.1.1/README | README |
|Build Status| |PyPI Version| |Python Versions| |License|
ATEMStreamingXML
================
Utility to update ATEM Software Control Streaming.xml file to support new streaming providers (for use with ATEM Mini Pro and ATEM Mini Pro ISO).
Installation
------------
Install with pip::
pip install ATEMStreamingXML
Command Line Usage
------------------
**Usage**::
ATEMStreamingXML [-h] -S SERVICE [-N SERVER_NAME] [-U SERVER_URL]
[--default-profiles] [-P PROFILE_NAME] [-C {1080p60,1080p30}]
[--br BITRATE] [--abr AUDIO_BITRATE] [--ki KEYFRAME_INTERVAL]
[--remove] [--remove-server] [--remove-profile] [--remove-config] [-n]
**Arguments**
-h, --help show this help message and exit
-S SERVICE, --service SERVICE Streaming service name to update/remove
-N SERVER_NAME, --server-name SERVER_NAME Streaming server name to update/remove
-U SERVER_URL, --server-url SERVER_URL Streaming server RTMP URL
--default-profiles Create or update default profiles for a streaming service
-P PROFILE_NAME, --profile-name PROFILE_NAME Streaming profile name to update/remove
-C RESOLUTION, --profile-config RESOLUTION Streaming profile config resolution and frame rate to update/remove (``1080p60`` or ``1080p30``)
--br BITRATE, --bitrate BITRATE Streaming profile config bitrate
--abr AUDIO_BITRATE, --audio-bitrate AUDIO_BITRATE Streaming profile config audio bitrate
--ki KEYFRAME_INTERVAL, --keyframe-interval KEYFRAME_INTERVAL Streaming profile config keyframe interval
--remove, --remove-service Remove streaming service
--remove-server Remove streaming server from a service
--remove-profile Remove streaming profile from a service
--remove-config Remove streaming profile config from a profile
-n, --dry-run Show changes that would be made
**Environment Variables**
``ATEM_STREAMING_XML``
Specify an alternate path to the ``Streaming.xml`` file (used for unit tests)
**Examples**
The `scripts <https://github.com/cchurch/ATEMStreamingXML/tree/master/scripts>`_ directory contains examples of command usage for alternate streaming services.
Caveats
-------
* Does not preserve XML comments (limitation of ``xml.etree.ElementTree``).
* Does not allow reordering of streaming services, servers or profiles.
* Does not save backup copy of original ``Streaming.xml``.
* Requires running with ``sudo`` and will prompt accordingly if access is denied to modify the ``Streaming.xml``.
.. |Build Status| image:: https://img.shields.io/github/workflow/status/cchurch/ATEMStreamingXML/test
:target: https://github.com/cchurch/ATEMStreamingXML/actions?query=workflow%3Atest
.. |PyPI Version| image:: https://img.shields.io/pypi/v/ATEMStreamingXML.svg
:target: https://pypi.python.org/pypi/ATEMStreamingXML
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/ATEMStreamingXML.svg
:target: https://pypi.python.org/pypi/ATEMStreamingXML
.. |License| image:: https://img.shields.io/pypi/l/ATEMStreamingXML.svg
:target: https://pypi.python.org/pypi/ATEMStreamingXML
| ATEMStreamingXML | /ATEMStreamingXML-0.1.4.tar.gz/ATEMStreamingXML-0.1.4/README.rst | README.rst |
# Python
from __future__ import print_function, with_statement, unicode_literals
import argparse
import difflib
import errno
import os
import sys
import xml.etree.ElementTree as ET
__version__ = '0.1.4'
def get_streaming_xml_path():
if sys.platform == 'darwin':
default_path = os.path.join('/Library', 'Application Support', 'Blackmagic Design', 'Switchers', 'Streaming.xml')
elif sys.platform == 'win32': # pragma: no cover
program_files_path = os.environ.get('ProgramFiles(x86)', os.environ.get('ProgramFiles'))
default_path = os.path.join(program_files_path, 'Blackmagic Design', 'Blackmagic ATEM Switchers', 'ATEM Software Control', 'Streaming.xml')
else: # pragma: no cover
default_path = 'Streaming.xml'
if os.environ.get('ATEM_STREAMING_XML', None) is None:
raise RuntimeError('unsupported platform: {}'.format(sys.platform))
return os.environ.get('ATEM_STREAMING_XML', default_path)
def find_sub_element_by_name(parent_element, child_tag, name_text, name_tag='name'):
for sub_element in parent_element.findall(child_tag):
name_element = sub_element.find(name_tag)
if name_element is not None and name_element.text == name_text:
return sub_element
def create_or_update_sub_element(parent_element, child_tag, text=None):
sub_element = parent_element.find(child_tag)
if sub_element is None:
sub_element = ET.SubElement(parent_element, child_tag)
if text is not None:
text = '{}'.format(text)
if sub_element.text != text:
sub_element.text = text
return sub_element
def create_sub_element(parent_element, child_tag, name_text=None, name_tag='name'):
sub_element = ET.SubElement(parent_element, child_tag)
if name_text is not None: # pragma: no cover
create_or_update_sub_element(sub_element, name_tag, name_text)
return sub_element
def get_or_create_config_element(profile_element, resolution, fps):
for config_element in profile_element.findall('config'):
if config_element.get('resolution') == resolution and config_element.get('fps') == fps:
return config_element
elif config_element.get('resultion') == resolution and config_element.get('fps') == fps: # pragma: no cover
config_element.attrib.pop('resultion')
config_element.set('resolution', resolution)
return config_element
config_element = ET.SubElement(profile_element, 'config')
config_element.set('resolution', resolution)
config_element.set('fps', fps)
return config_element
def update_profile_element(profile_element, **kwargs):
profile_name = kwargs.get('profile_name')
create_or_update_sub_element(profile_element, 'name', profile_name)
profile_config = kwargs.get('profile_config')
if not profile_config:
return
elif profile_config in ('1080p60', '1080p30'):
resolution, fps = profile_config[:5], profile_config[-2:]
else: # pragma: no cover
raise ValueError('invalid profile config: {}'.format(profile_config))
config_element = get_or_create_config_element(profile_element, resolution, fps)
if kwargs.get('remove_config', False):
profile_element.remove(config_element)
return profile_element
bitrate = kwargs.get('bitrate')
if bitrate is not None:
if bitrate > 0:
create_or_update_sub_element(config_element, 'bitrate', bitrate)
else:
bitrate_element = config_element.find('bitrate')
if bitrate_element is not None:
config_element.remove(bitrate_element)
audio_bitrate = kwargs.get('audio_bitrate')
if audio_bitrate is not None:
if audio_bitrate > 0:
create_or_update_sub_element(config_element, 'audio-bitrate', audio_bitrate)
else:
audio_bitrate_element = config_element.find('audio-bitrate')
if audio_bitrate_element is not None:
config_element.remove(audio_bitrate_element)
keyframe_interval = kwargs.get('keyframe_interval')
if keyframe_interval is not None:
if keyframe_interval > 0:
create_or_update_sub_element(config_element, 'keyframe-interval', keyframe_interval)
else:
keyframe_interval_element = config_element.find('keyframe-interval')
if keyframe_interval_element is not None:
config_element.remove(keyframe_interval_element)
return profile_element
def update_server_element(server_element, **kwargs):
create_or_update_sub_element(server_element, 'name', kwargs.get('server_name'))
create_or_update_sub_element(server_element, 'url', kwargs.get('server_url'))
return server_element
def update_service_element(service_element, **kwargs):
servers_element = create_or_update_sub_element(service_element, 'servers')
server_name = kwargs.get('server_name')
if server_name:
server_element = find_sub_element_by_name(servers_element, 'server', server_name)
if kwargs.get('remove_server', False):
if server_element is not None:
servers_element.remove(server_element)
else:
if server_element is None:
server_element = create_sub_element(servers_element, 'server', server_name)
update_server_element(server_element, **kwargs)
profiles_element = create_or_update_sub_element(service_element, 'profiles')
if kwargs.get('default_profiles', False):
profiles_list = [
dict(profile_name='Streaming High', profile_config='1080p60', bitrate=9000000),
dict(profile_name='Streaming High', profile_config='1080p30', bitrate=6000000),
dict(profile_name='Streaming Medium', profile_config='1080p60', bitrate=7000000),
dict(profile_name='Streaming Medium', profile_config='1080p30', bitrate=4500000),
dict(profile_name='Streaming Low', profile_config='1080p60', bitrate=4500000),
dict(profile_name='Streaming Low', profile_config='1080p30', bitrate=3000000),
]
for profile_kwargs in profiles_list:
profile_kwargs.setdefault('audio_bitrate', 128000)
profile_kwargs.setdefault('keyframe_interval', 2)
elif kwargs.get('profile_name'):
profiles_list = [kwargs]
else:
profiles_list = []
for profile_kwargs in profiles_list:
profile_name = profile_kwargs.get('profile_name')
profile_element = find_sub_element_by_name(profiles_element, 'profile', profile_name)
if kwargs.get('remove_profile', False):
if profile_element is not None:
profiles_element.remove(profile_element)
else:
if profile_element is None:
profile_element = create_sub_element(profiles_element, 'profile', profile_name)
update_profile_element(profile_element, **profile_kwargs)
return service_element
def update_streaming_element(streaming_element, **kwargs):
assert streaming_element.tag == 'streaming'
service_name = kwargs.get('service_name')
service_element = find_sub_element_by_name(streaming_element, 'service', service_name)
if kwargs.get('remove_service', False):
if service_element is not None:
streaming_element.remove(service_element)
else:
if service_element is None:
service_element = create_sub_element(streaming_element, 'service', service_name)
update_service_element(service_element, **kwargs)
def update_xml_indentation(element, text='\n\t', tail=''):
if len(element):
assert not (element.text or '').strip() # Make sure there's no extra text except for whitespace.
element.text = text
element.tail = (element.tail or '').rstrip() + tail
for n, child_element in enumerate(element):
child_text = text + '\t'
if n == (len(element) - 1):
child_tail = text[:-1]
elif tail:
child_tail = text
else:
child_tail = '\n' + text
update_xml_indentation(child_element, child_text, child_tail)
else:
element.tail = (element.tail or '').rstrip() + tail
def element_tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ET.ElementTree(element).write(file, encoding, method=method)
if sys.version_info[0] == 2: # pragma: no cover
data = [d.encode('UTF-8') if isinstance(d, unicode) else d for d in data[:]] # noqa
return str('').join(data)
else:
return b''.join(data)
def update_streaming_xml(**kwargs):
parser = kwargs.get('parser', None)
dry_run = kwargs.get('dry_run', False)
xml_parser = ET.XMLParser(encoding='UTF-8')
tree = ET.parse(get_streaming_xml_path(), parser=xml_parser)
streaming_element = tree.getroot()
original_xml = element_tostring(streaming_element, encoding='UTF-8').decode('UTF-8')
original_lines = original_xml.splitlines(True)
update_streaming_element(streaming_element, **kwargs)
update_xml_indentation(streaming_element)
updated_xml = element_tostring(streaming_element, encoding='UTF-8').decode('UTF-8')
updated_lines = updated_xml.splitlines(True)
for line in difflib.context_diff(original_lines, updated_lines, fromfile='Streaming-old.xml', tofile='Streaming-new.xml'):
print(line.rstrip('\n'))
if not dry_run:
try:
tree.write(get_streaming_xml_path(), encoding='UTF-8', xml_declaration=True)
except (IOError, OSError) as e: # pragma: no cover
if e.errno == errno.EACCES:
if sys.platform == 'win32':
parser.exit(e.errno, '{}\nTry running the command as an Administrator.'.format(e))
else:
parser.exit(e.errno, '{}\nTry running the command with sudo.'.format(e))
else:
parser.exit(e.errno, '{}'.format(e))
def main(*args):
parser = argparse.ArgumentParser(
description='Modify ATEM Mini Pro Streaming.xml.',
)
parser.add_argument(
'-V',
'--version',
action='version',
version='%(prog)s {}'.format(__version__),
)
parser.add_argument(
'-S',
'--service',
dest='service_name',
metavar='SERVICE',
required=True,
help='Streaming service name to update/remove',
)
parser.add_argument(
'-N',
'--server-name',
dest='server_name',
metavar='SERVER_NAME',
help='Streaming server name to update/remove',
)
parser.add_argument(
'-U',
'--server-url',
dest='server_url',
metavar='SERVER_URL',
help='Streaming server RTMP URL',
)
parser.add_argument(
'--default-profiles',
dest='default_profiles',
action='store_true',
default=False,
help='Create or update default profiles for a streaming service',
)
parser.add_argument(
'-P',
'--profile-name',
dest='profile_name',
metavar='PROFILE_NAME',
help='Streaming profile name to update/remove',
)
parser.add_argument(
'-C',
'--profile-config',
dest='profile_config',
choices=['1080p60', '1080p30'],
help='Streaming profile config resolution and frame rate to update/remove',
)
parser.add_argument(
'--br',
'--bitrate',
dest='bitrate',
type=int,
help='Streaming profile config bitrate',
)
parser.add_argument(
'--abr',
'--audio-bitrate',
dest='audio_bitrate',
type=int,
help='Streaming profile config audio bitrate',
)
parser.add_argument(
'--ki',
'--keyframe-interval',
dest='keyframe_interval',
type=int,
help='Streaming profile config keyframe interval',
)
parser.add_argument(
'--remove',
'--remove-service',
dest='remove_service',
action='store_true',
default=False,
help='Remove streaming service',
)
parser.add_argument(
'--remove-server',
dest='remove_server',
action='store_true',
default=False,
help='Remove streaming server from a service',
)
parser.add_argument(
'--remove-profile',
dest='remove_profile',
action='store_true',
default=False,
help='Remove streaming profile from a service',
)
parser.add_argument(
'--remove-config',
dest='remove_config',
action='store_true',
default=False,
help='Remove streaming profile config from a profile',
)
parser.add_argument(
'-n',
'--dry-run',
dest='dry_run',
action='store_true',
default=False,
help='Show changes that would be made',
)
ns = parser.parse_args(args or None)
if ns.server_name and not (ns.server_url or ns.remove_server):
parser.error('The --server-name option requires either --server-url or --remove-server')
if ns.remove_server and not ns.server_name:
parser.error('The --remove-server option requires --server-name')
if ns.remove_profile and not ns.profile_name:
parser.error('The --remove-profile option requires --profile-name')
if ns.profile_config and not ns.profile_name:
parser.error('The --profile-config option requires --profile-name')
if ns.remove_config and not ns.profile_config:
parser.error('The --remove-config option requires --profile-config')
kwargs = dict(vars(ns), parser=parser)
update_streaming_xml(**kwargs)
if __name__ == '__main__':
main() | ATEMStreamingXML | /ATEMStreamingXML-0.1.4.tar.gz/ATEMStreamingXML-0.1.4/ATEMStreamingXML.py | ATEMStreamingXML.py |
import argparse
import os
from banner import show_banner
from decryption import decrypt
from encryption import encrypt
def encrypt_file(file_path: str, password: str, extension: str):
with open(file_path, "r", encoding='utf-8') as k:
message = k.read()
encrypted, salt = encrypt(message, password)
encrypted_file_path = os.path.splitext(file_path)[0] + extension
with open(encrypted_file_path, 'wb', encoding='utf-8') as e:
e.write(salt)
e.write(encrypted)
print(
f'[*] File Encryption Completed\n[*] File name: {encrypted_file_path}')
def decrypt_file(file_path: str, password: str, extension: str):
with open(file_path, 'rb', encoding='utf-8') as d:
salt = d.read(16)
encrypted_message = d.read()
try:
decrypted = decrypt(encrypted_message, password, salt)
decrypted_file_path = file_path
with open(decrypted_file_path+extension, 'w', encoding='utf-8') as t:
t.write(decrypted)
print(f'File decrypted and saved as {decrypted_file_path}')
except:
print("Invalid Password")
def main():
parser = argparse.ArgumentParser(
description="ATENIGMA Basic File encryption and decryption with password")
parser.add_argument('--file',
help='Path to the file to be processed')
parser.add_argument('--key', help='Give your password')
parser.add_argument('--encrypt', action='store_true',
help='Encrypt the file or data')
parser.add_argument('--decrypt', action='store_true',
help='Decrypt the file or data')
parser.add_argument('--showbanner', action='store_true',
help='Show the banner of the tool')
parser.add_argument('--exten', default='.enc',
help='File extension for encrypted/decrypted files')
args = parser.parse_args()
file_path = args.file
password = args.key
if args.encrypt:
encrypt_file(file_path, password, args.exten)
elif args.decrypt:
decrypt_file(file_path, password, args.exten)
elif args.showbanner:
show_banner()
else:
print("[!] No action selected (encrypt or decrypt)")
if __name__ == '__main__':
main() | ATENIGMA | /ATENIGMA-0.1.4-py3-none-any.whl/atenigma/main.py | main.py |
textCPS = 2
import time
from inspect import signature
def slowprint(text, cps = textCPS, color = "white") :
black = "\033[0;30m"
purple = "\033[0;35m"
blue = "\033[0;34m"
green = "\033[0;32m"
red = "\033[0;31m"
yellow = "\033[0;33m"
white = "\033[0;37m"
cps = 1/cps
if color == "white" :
print(white)
if color == "blue" :
print(blue)
if color == "black" :
print(black)
if color == "purple" :
print(purple)
if color == "green" :
print(green)
if color == "red" :
print(red)
if color == "yellow" :
print(yellow)
for i in text:
print(i, end="")
time.sleep(cps)
def chngCPS(newCPS):
global textCPS
textCPS = newCPS
class Object():
def __init__(self, name):
self.name = name
self.value = name
class WeaponSource(Object):
def __init__(self):
pass
class Weapon(Object):
def __init__(self, name, dmg, owner, exhaustible = False, source = None):
self.name = name
self.value = name
self.damage = damage
self.owner = owner
self.exhaustible = exhaustible
if self.exhaustible:
self.source = source
def destroy(self):
slowprint(self.value + " got destroyed", color = "green")
def addDamage(self, dmg):
self.damage += dmg
def subDamage(self, dmg):
self.damage -= dmg
def chngDamage(self, dmg):
self.damage = dmg
def use(self, attacker, prey):
if self.exhaustible:
if not attacker.findInv(self.source):
slowprint("No source available for " + self.value, color = "red")
class Enemy():
def __init__(self, name, dmg, health, enType = ""):
self.name = name
self.value = name
self.damage = dmg
self.health = health
if enType == "":
self.enType = name
else:
self.enType = enType
def addHealth(self, health):
self.health += health
def subHealth(self, health):
self.health -= health
def chngHealth(self, health):
self.health = health
def addDamage(self, dmg):
self.damage += dmg
def subDamage(self, dmg):
self.damage -= dmg
def chngDamage(self, dmg):
self.damage = dmg
def kill(self):
slowprint(self.name + " was killed.", color = "green")
del(self)
def killPlyr(self, plyr):
slowprint(plyr.name + " was killed by " + self.enType + ".", config.textCPS, color = "red")
plyr.health = 0
class Room():
def __init__(self, enemies, teleports, events):
self.enemies = enemies
self.teleports = teleports
self.events = events
def runEvent(self, index, parameters):
sig = signature(self.events[index])
params = sig.parameters
paramlen = len(params)
while len(parameters) > paramlen:
parameters.pop()
self.events[index](*parameters)
def teleport(self, index, newRoomVar):
newRoomVar = self.teleports[index]
return
class Inventory():
inv = []
def __init__(self, limit):
self.limit = limit
def addInv(self, obj):
if self.limit > len(self.inv):
self.inv.append(obj.value)
else:
slowprint("Inventory is full", 5, "red")
def delInv(self, obj, plyr):
for i in range(0, len(self.inv)):
if self.inv[i] == obj.value:
self.inv.pop(i)
return
slowprint("Object '" + obj.value + "' doesn't exist in the inventory of " + plyr.name + ".", 20, "red")
class Player():
def __init__(self, name, health, willpower):
self.name = name
self.health = health
self.willpower = willpower
self.invMake()
def chngHealth(self, newHealth):
self.health = newHealth
def addHealth(self, health):
self.health += health
def subHealth(self, health):
self.health -= health
def chngName(newName):
self.name = newName
def addWill(self, will):
self.willpower += will
def subWill(self, will):
self.willpower -= will
def invMake(self, starters = []) :
self.inventory = Inventory(10)
self.inv = self.inventory.inv
def retInv(self):
return self.inventory.inv
def addInv(self, obj):
self.inventory.addInv(obj = obj)
self.inv = self.inventory.inv
def delInv(self, obj):
self.inventory.delInv(obj = obj, plyr = self)
self.inv = self.inventory
def findInv(self, obj):
j = 0
for i in self.inv:
if i.value == obj.value:
return j
j+=1
return False
def dialogue(plyr, text, color = "white"):
global textCPS
refText = plyr.name + " : " + text
refText = refText.upper()
slowprint(refText, textCPS, color)
def narrate(text, expression = "statement"):
global textCPS
if expression == "statement":
slowprint(text, textCPS, "white")
elif expression == "expression":
slowprint("*" + text + "*", textCPS, color = "purple")
def ynQuestion(question, assumption = False, assumptionText = ""):
global textCPS
narrate(question + "[y/n] : ")
answer = input()
if answer not in ["yes", "y", "n", "no"]:
if assumption == False:
slowprint("Invalid option!", color = "red")
return ynQuestion(question = question, options = options)
slowprint(assumptionText)
return "assumption"
answer = answer.lower()
if answer in ['yes', 'y']:
return True
return False
def multiChoiceQ(question, options, assumption = False, assumptionText = ""):
global textCPS
narrate(question + '\n')
for i in range(0, len(options)):
slowprint(" [" + str(i + 1) + "]. " + options[i], textCPS, color = "yellow")
print('\n' + "\033[0;37m" + " Answer : ", end="")
optionsLower = []
for i in range(0, len(options)):
optionsLower.append(options[i].lower())
answer = input()
try:
answer = int(answer)
except:
pass
try:
assumptions = 0 < answer <= len(options)
except:
assumptions = answer in optionsLower
if not assumptions:
assumptions = answer in options
if not assumptions:
if assumption == False:
slowprint("Invalid option!", color = "red")
return multiChoiceQ(question = question, options = options)
slowprint(assumptionText)
return "assumption"
for i in range(0, len(options)):
if answer.lower() in optionsLower[i] or answer in str(i):
return i+1
def ndefstrQ(question):
global textCPS
narrate(question + " : ")
answer = input()
return answer.lower() | ATG-Engine-Test | /ATG%20Engine%20Test-1.0.0.tar.gz/ATG Engine Test-1.0.0/atg/main.py | main.py |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from copy import deepcopy
from atlas_object.lightcurve_class import lightcurves
class atlas_object(object):
"""Class that represents an object with ATLAS photometry.
"""
def __init__(self, lc_file):
"""
Parameters
----------
lc_file: str
Light-curve CSV file with ATLAS photometry.
"""
self.lc_file = lc_file
lc_df = pd.read_csv(lc_file)
self.lc_df = lc_df
self.lcs = lightcurves(lc_df)
self.init_lcs = deepcopy(self.lcs)
self.bands = lc_df.F.unique()
self.modified = False
def __repr__(self):
rep = (f'LC file: {self.lc_file}')
return rep
def __getitem__(self, item):
return getattr(self, item)
def plot_original(self, xmin=None, xmax=None):
"""Plots the initial light curves.
Parameters
----------
xmin: float, default 'None'
Minimum x-axis range.
xmax: float, default 'None'
Maximum x-axis range.
"""
mags = np.empty(0)
fig, ax = plt.subplots(figsize=(8, 6))
for filt in self.bands:
init_lcs = self.init_lcs[filt]
ax.errorbar(init_lcs.time, init_lcs.mag, init_lcs.mag_err,
fmt='o', label=filt, c=init_lcs.color, mec='k')
mags = np.r_[mags, init_lcs.mag]
ax.set_ylabel('Appartent Magnitude', fontsize=18)
ax.set_xlabel('MJD', fontsize=18)
ax.tick_params(labelsize=18)
ax.set_ylim(mags.min() - 0.5, mags.max() + 0.5)
ax.set_xlim(xmin, xmax)
ax.invert_yaxis()
ax.legend(fontsize=18)
plt.show()
def plot_lcs(self, xmin=None, xmax=None):
"""Plots the current state of the light curves.
Parameters
----------
xmin: float, default 'None'
Minimum x-axis range.
xmax: float, default 'None'
Maximum x-axis range.
"""
mags = np.empty(0)
fig, ax = plt.subplots(figsize=(8, 6))
for filt in self.bands:
if self.modified:
lcs = self.lcs[filt]
ax.errorbar(lcs.time, lcs.mag, lcs.mag_err,
fmt='*', c=lcs.color)
alpha = 0.2
else:
alpha = 1
init_lcs = self.init_lcs[filt]
ax.errorbar(init_lcs.time, init_lcs.mag, init_lcs.mag_err,
fmt='o', label=filt, c=init_lcs.color, mec='k',
alpha=alpha)
mags = np.r_[mags, init_lcs.mag]
ax.set_ylabel('Appartent Magnitude', fontsize=18)
ax.set_xlabel('MJD', fontsize=18)
ax.tick_params(labelsize=18)
ax.set_ylim(mags.min() - 0.5, mags.max() + 0.5)
ax.set_xlim(xmin, xmax)
ax.invert_yaxis()
ax.legend(fontsize=18)
plt.show()
def rolling(self, window, center=False,
sigma_clip=False, **sigclip_kwargs):
"""Weighted rolling mean function.
Parameters
----------
window: float
Time window in units of days.
center: bool, default 'False'
If 'False', set the window labels as the right
edge of the window index. If 'True', set the window
labels as the center of the window index.
sigma_clip: bool, default 'False'
If 'True', sigma clipping is performed within rolling
windows.
sigclip_kwargs: dict
Input parameters for the sigma clipping. See 'sigma_clip()'.
"""
color_dict = {'c': 'blue', 'o': 'red'}
for filt in self.bands:
self.lcs[filt].rolling(window, center,
sigma_clip,
**sigclip_kwargs)
self.lcs[filt].color = color_dict[filt]
self.modified = True
def sigma_clip(self, niter=0, n_sigma=3, use_median=False):
"""Performs sigma clipping.
Parameters
----------
niter: int, default ``1``
The number of sigma-clipping iterations to perform.
If niter is negative, iterations will continue until no more
clipping occurs or until abs('niter') is reached, whichever
is reached first.
n_sigma: float, default '3'
Number of standard deviations used.
use_median: bool, default 'False':
If 'True', use median of data instead of mean.
"""
color_dict = {'c': 'blue', 'o': 'red'}
for filt in self.bands:
self.lcs[filt].sigma_clip(niter, n_sigma, use_median)
self.lcs[filt].color = color_dict[filt]
self.modified = True | ATLAS-Object | /ATLAS_Object-0.1.0-py3-none-any.whl/atlas_object/atlas_class.py | atlas_class.py |
import numpy as np
def clip(data, mean, sigma):
"""Performs sigma clipping of data around mean.
Parameters
----------
data numpy.ndarray:
Array of values.
mean: float
Value around which to clip (does not have to be the mean).
sigma: float
Sigma-value for clipping.
Returns
-------
indices: numpy.ndarray
Indices of non-clipped data.
"""
ilow = data >= mean - sigma
ihigh = data <= mean + sigma
indices = np.logical_and(ilow, ihigh)
return indices
def calc_sigma(data, errors=None):
"""Calculates the weighted standard deviation.
Parameters
----------
data: numpy.ndarray
Data to be averaged.
errors: numpy.ndarray, default 'None'
Errors for the data. If 'None', unweighted
values are calculated.
Returns
-------
wmean: numpy.ndarray
Weighted mean.
wsigma: numpy.ndarray
Weighted standard deviation.
"""
if errors is None:
w = 1.0
else:
w = 1.0 / errors ** 2
wmean = np.average(data, weights=w)
V1 = w.sum()
V2 = (w ** 2).sum()
# weighted sample variance
wsigma = np.sqrt(((data - wmean) * (data - wmean) * w).sum() *
(V1 / (V1 * V1 - V2)))
return wmean, wsigma
def weighted_sigmaclip(data, errors=None, niter=1, n_sigma=3,
use_median=False):
"""Remove outliers from data which lie more than n_sigma
standard deviations from mean.
Parameters
----------
data: numpy.ndarray
Array containing data values.
errors: numpy.ndarray, default 'None'
Errors associated with the data. If 'None', unweighted mean
and standard deviation are used in calculations.
niter: int, default '1'
Number of iterations to calculate mean and standard
deviation, and reject outliers, If niter is negative,
iterations will continue until no more clipping occurs or
until abs('niter') is reached, whichever is reached first.
n_sigma: float, default '3'
Number of standard deviations used for sigma clipping.
use_median: bool, default 'False':
If 'True', use median of data instead of mean.
Returns
-------
indices: boolan numpy.array
Boolean numpy array of indices indicating which
elements are clipped (False), with the same shape as the
input
i: int
Number of iterations
"""
# indices keeps track which data should be discarded
indices = np.ones(len(data.ravel()),
dtype=np.bool).reshape(data.shape)
if niter < 0:
nniter = -niter
else:
nniter = niter
for i in range(nniter):
newdata = data[indices]
if errors is None:
newerrors = None
else:
newerrors = errors[indices]
N = len(newdata)
if N < 2:
# no data left to clip
return indices, i
mean, sigma = calc_sigma(newdata, newerrors)
if use_median:
mean = np.median(newdata)
newindices = clip(data, mean, n_sigma * sigma)
if niter < 0:
# break when no changes
if (newindices == indices).all():
break
indices = newindices
return indices, i + 1 | ATLAS-Object | /ATLAS_Object-0.1.0-py3-none-any.whl/atlas_object/sigma_clipping.py | sigma_clipping.py |
import numpy as np
import pandas as pd
from atlas_object.utils import flux2mag, mag2flux
from atlas_object.rolling import weighted_rolling
from atlas_object.sigma_clipping import weighted_sigmaclip
class lightcurve(object):
"""Light curve class.
"""
def __init__(self, band, lcs_df):
"""
Parameters
----------
band: str
ATLAS band (o or c).
lcs_df: DataFrame
ATLAS forced photometry.
"""
self.band = band
data = lcs_df[lcs_df.F == band]
self.time = data.MJD.values
self.flux = data.uJy.values
self.flux_err = data.duJy.values
self.mag = data.m.values
self.mag_err = data.dm.values
color_dict = {'c': 'cyan', 'o': 'orange'}
self.color = color_dict[band]
self.snr = data.uJy.values / data.duJy.values
self.zp = 23.9
def __repr__(self):
return f'band: {self.band}'
def __getitem__(self, item):
return getattr(self, item)
def mask_lc(self, mask):
"""Masks the light curve with the given mask.
Parameters
----------
mask: booleans
Mask with the same length as the light curves.
"""
self.time = self.time.copy()[mask]
self.flux = self.flux.copy()[mask]
self.flux_err = self.flux_err.copy()[mask]
self.mag = self.mag.copy()[mask]
self.mag_err = self.mag_err.copy()[mask]
self.snr = self.snr.copy()[mask]
def sigma_clip(self, niter=1, n_sigma=3, use_median=False):
"""Performs sigma clipping.
Parameters
----------
niter: int, default ``1``
The number of sigma-clipping iterations to perform.
If niter is negative, iterations will continue until no more
clipping occurs or until abs('niter') is reached, whichever
is reached first.
n_sigma: float, default '3'
Number of standard deviations used.
use_median: bool, default 'False':
If 'True', use median of data instead of mean.
"""
indices, iter_val = weighted_sigmaclip(self.flux, self.flux_err,
niter, n_sigma, use_median)
self.time = self.time[indices]
self.flux = self.flux[indices]
self.flux_err = self.flux_err[indices]
self.mag = self.mag[indices]
self.mag_err = self.mag_err[indices]
self.indices = indices
self.iter = iter_val
def rolling(self, window, center=False,
sigma_clip=False, **sigclip_kwargs):
"""Weighted rolling mean function.
Parameters
----------
window: float
Time window in units of days.
center: bool, default 'False'
If 'False', set the window labels as the right
edge of the window index. If 'True', set the window
labels as the center of the window index.
sigma_clip: bool, default 'False'
If 'True', sigma clipping is performed within rolling
windows.
sigclip_kwargs: dict
Input parameters for the sigma clipping. See 'sigma_clip()'.
"""
x, y, yerr, inds = weighted_rolling(self.time, self.flux,
self.flux_err, window, center,
sigma_clip, **sigclip_kwargs)
self.time = x
self.flux = y
self.flux_err = yerr
self.mag, self.mag_err = flux2mag(y, self.zp, yerr)
self.indices = inds
class lightcurves(object):
"""Multi-colour light curves class.
"""
def __init__(self, lcs_df):
"""
Parameters
----------
lcs_df: DataFrame
ATLAS forced photometry.
"""
self.bands = lcs_df.F.unique()
for band in self.bands:
lc = lightcurve(band, lcs_df)
setattr(self, band, lc)
def __repr__(self):
return str(self.bands)
def __getitem__(self, item):
return getattr(self, item) | ATLAS-Object | /ATLAS_Object-0.1.0-py3-none-any.whl/atlas_object/lightcurve_class.py | lightcurve_class.py |
import numpy as np
from atlas_object.sigma_clipping import weighted_sigmaclip
def weighted_rolling(x_data, y_data, yerr_data=None,
window=3, center=False,
sigma_clip=False, **sigclip_kwargs):
"""Weighted rolling functions, similar to pandas
rolling function.
Parameters
----------
x_data: array
X-axis data.
y_data: array
Y-axis data.
yerr_data: array
Y-axis error.
window: float
Time window in the same units as 'x'.
center: bool, default 'False'
If 'False', set the window labels as the right
edge of the window index. If 'True', set the window
labels as the center of the window index.
Returns
-------
4-tuple with rolling data ('x', 'y' and 'yerr' arrays) and
indices of the data removed by the sigma clipping
"""
rolling_dict = {'x': [], 'y': [], 'yerr': []}
if yerr_data is None:
yerr_data = np.ones_like(y_data)
x_used = np.empty(0)
for i, x in enumerate(x_data):
# window type
if center == True:
roll_x = x_data.copy()
roll_y = y_data.copy()
roll_yerr = yerr_data.copy()
mask = np.abs(x - roll_x) <= window / 2
else:
roll_x = x_data[:i + 1].copy()
roll_y = y_data[:i + 1].copy()
roll_yerr = yerr_data[:i + 1].copy()
mask = x - roll_x <= window
roll_x = roll_x[mask]
roll_y = roll_y[mask]
roll_yerr = roll_yerr[mask]
# if only one or no data point is left,
# no need to do anything else
if len(roll_x) == 0:
continue
elif len(roll_x) == 1:
rolling_dict['x'].append(roll_x[0])
rolling_dict['y'].append(roll_y[0])
rolling_dict['yerr'].append(roll_yerr[0])
continue
# sigma clipping within rolling segments
if sigma_clip:
if 'errors' in sigclip_kwargs.keys():
errors = sigclip_kwargs['errors']
sigclip_kwargs.pop('errors')
else:
errors = roll_x
mask, n_iter = weighted_sigmaclip(roll_y,
errors,
**sigclip_kwargs)
roll_x = roll_x[mask]
roll_y = roll_y[mask]
roll_yerr = roll_yerr[mask]
if len(roll_x) == 0:
continue
# keep track of the values being used
x_used = np.r_[x_used, roll_x]
# calculate weighted mean and error propagation
# x-axis
rolling_dict['x'].append(roll_x.mean())
# y-axis
w = 1 / roll_yerr ** 2
wmean = np.average(roll_y, weights=w)
rolling_dict['y'].append(wmean)
# y-error: standard deviation of the weighted mean
wstd = np.sqrt(1 / np.sum(w))
rolling_dict['yerr'].append(wstd)
# turn lists into arrays
for key, values in rolling_dict.items():
rolling_dict[key] = np.array(rolling_dict[key])
# values used
indices = np.array([True if x in x_used else False for x in x_data])
return rolling_dict['x'], rolling_dict['y'], rolling_dict['yerr'], indices | ATLAS-Object | /ATLAS_Object-0.1.0-py3-none-any.whl/atlas_object/rolling.py | rolling.py |
======================
read-ATM1b-QFIT-binary
======================
|Language|
|License|
|PyPI Version|
|Documentation Status|
.. |Language| image:: https://img.shields.io/pypi/pyversions/ATM1b-QFIT?color=green
:target: https://www.python.org/
.. |License| image:: https://img.shields.io/github/license/tsutterley/read-ATM1b-QFIT-binary
:target: https://github.com/tsutterley/read-ATM1b-QFIT-binary/blob/main/LICENSE
.. |PyPI Version| image:: https://img.shields.io/pypi/v/ATM1b-QFIT.svg
:target: https://pypi.python.org/pypi/ATM1b-QFIT/
.. |Documentation Status| image:: https://readthedocs.org/projects/read-atm1b-qfit-binary/badge/?version=latest
:target: https://read-atm1b-qfit-binary.readthedocs.io/en/latest/?badge=latest
Reads Level-1b Airborne Topographic Mapper (ATM) QFIT binary data products
- `IceBridge ATM L1B Qfit Elevation and Return Strength <https://nsidc.org/data/ilatm1b/1>`_
- `IceBridge Narrow Swath ATM L1B Qfit Elevation and Return Strength <https://nsidc.org/data/ilnsa1b/1>`_
- `NSIDC IceBridge Software Tools <https://nsidc.org/data/icebridge/tools.html>`_
- `Python program for retrieving Operation IceBridge data <https://github.com/tsutterley/nsidc-earthdata>`_
Dependencies
############
- `numpy: Scientific Computing Tools For Python <https://numpy.org>`_
- `h5py: Python interface for Hierarchal Data Format 5 (HDF5) <https://www.h5py.org/>`_
- `lxml: processing XML and HTML in Python <https://pypi.python.org/pypi/lxml>`_
- `future: Compatibility layer between Python 2 and Python 3 <https://python-future.org/>`_
Download
########
| The program homepage is:
| https://github.com/tsutterley/read-ATM1b-QFIT-binary
| A zip archive of the latest version is available directly at:
| https://github.com/tsutterley/read-ATM1b-QFIT-binary/archive/main.zip
| Incorporated into the UW-APL pointCollection repository at:
| https://github.com/SmithB/pointCollection
Credits
#######
`Program inspired by the QFIT C reader provided on NSIDC <ftp://sidads.colorado.edu/pub/tools/icebridge/qfit/c/>`_
Disclaimer
##########
This project contains work and contributions from the `scientific community <./CONTRIBUTORS.rst>`_.
This program is not sponsored or maintained by the Universities Space Research Association (USRA) or NASA.
It is provided here for your convenience but *with no guarantees whatsoever*.
License
#######
The content of this project is licensed under the
`Creative Commons Attribution 4.0 Attribution license <https://creativecommons.org/licenses/by/4.0/>`_
and the source code is licensed under the `MIT license <LICENSE>`_.
| ATM1b-QFIT | /ATM1b-QFIT-1.0.1.tar.gz/ATM1b-QFIT-1.0.1/README.rst | README.rst |
u"""
time.py
Written by Tyler Sutterley (05/2022)
Utilities for calculating time operations
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
lxml: processing XML and HTML in Python
https://pypi.python.org/pypi/lxml
PROGRAM DEPENDENCIES:
utilities.py: download and management utilities for syncing files
UPDATE HISTORY:
Updated 05/2022: changed keyword arguments to camel case
Updated 04/2022: updated docstrings to numpy documentation format
Updated 04/2021: updated NIST ftp server url for leap-seconds.list
Updated 03/2021: replaced numpy bool/int to prevent deprecation warnings
Updated 02/2021: NASA CDDIS anonymous ftp access discontinued
Updated 01/2021: added ftp connection checks
add date parser for cases when only a calendar date with no units
Updated 12/2020: merged with convert_julian and convert_calendar_decimal
added calendar_days routine to get number of days per month
Updated 09/2020: added wrapper function for merging Bulletin-A files
can parse date strings in form "time-units since yyyy-mm-dd hh:mm:ss"
Updated 08/2020: added NASA Earthdata routines for downloading from CDDIS
Written 07/2020
"""
import re
import datetime
import numpy as np
import ATM1b_QFIT.utilities
#-- PURPOSE: convert times from seconds since epoch1 to time since epoch2
def convert_delta_time(delta_time, epoch1=None, epoch2=None, scale=1.0):
"""
Convert delta time from seconds since ``epoch1`` to time since ``epoch2``
Parameters
----------
delta_time: float
seconds since epoch1
epoch1: tuple or NoneType, default None
epoch for input delta_time
epoch2: tuple or NoneType, default None
epoch for output delta_time
scale: float, default 1.0
scaling factor for converting time to output units
"""
epoch1 = datetime.datetime(*epoch1)
epoch2 = datetime.datetime(*epoch2)
delta_time_epochs = (epoch2 - epoch1).total_seconds()
#-- subtract difference in time and rescale to output units
return scale*(delta_time - delta_time_epochs)
#-- PURPOSE: calculate the delta time from calendar date
#-- http://scienceworld.wolfram.com/astronomy/JulianDate.html
def convert_calendar_dates(year, month, day, hour=0.0, minute=0.0, second=0.0,
epoch=(1992,1,1,0,0,0), scale=1.0):
"""
Calculate the time in time units since ``epoch`` from calendar dates
Parameters
----------
year: float
calendar year
month: float
month of the year
day: float
day of the month
hour: float, default 0.0
hour of the day
minute: float, default 0.0
minute of the hour
second: float, default 0.0
second of the minute
epoch: tuple, default (1992,1,1,0,0,0)
epoch for output delta_time
scale: float, default 1.0
scaling factor for converting time to output units
Returns
-------
delta_time: float
days since epoch
"""
#-- calculate date in Modified Julian Days (MJD) from calendar date
#-- MJD: days since November 17, 1858 (1858-11-17T00:00:00)
MJD = 367.0*year - np.floor(7.0*(year + np.floor((month+9.0)/12.0))/4.0) - \
np.floor(3.0*(np.floor((year + (month - 9.0)/7.0)/100.0) + 1.0)/4.0) + \
np.floor(275.0*month/9.0) + day + hour/24.0 + minute/1440.0 + \
second/86400.0 + 1721028.5 - 2400000.5
epoch1 = datetime.datetime(1858,11,17,0,0,0)
epoch2 = datetime.datetime(*epoch)
delta_time_epochs = (epoch2 - epoch1).total_seconds()
#-- return the date in days since epoch
return scale*np.array(MJD - delta_time_epochs/86400.0,dtype=np.float64)
#-- PURPOSE: Count number of leap seconds that have passed for each GPS time
def count_leap_seconds(GPS_Time, truncate=True):
"""
Counts the number of leap seconds between a given GPS time and UTC
Parameters
----------
GPS_Time: float
seconds since January 6, 1980 at 00:00:00
truncate: bool, default True
Reduce list of leap seconds to positive GPS times
Returns
-------
n_leaps: float
number of elapsed leap seconds
"""
#-- get the valid leap seconds
leaps = get_leap_seconds(truncate=truncate)
#-- number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time,dtype=np.float64)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
indices = np.nonzero(GPS_Time >= leap)
n_leaps[indices] += 1.0
#-- return the number of leap seconds for converting to UTC
return n_leaps
#-- PURPOSE: Define GPS leap seconds
def get_leap_seconds(truncate=True):
"""
Gets a list of GPS times for when leap seconds occurred
Parameters
----------
truncate: bool, default True
Reduce list of leap seconds to positive GPS times
Returns
-------
GPS time: float
GPS seconds when leap seconds occurred
"""
leap_secs = ATM1b_QFIT.utilities.get_data_path(['data','leap-seconds.list'])
#-- find line with file expiration as delta time
with open(leap_secs,'r') as fid:
secs, = [re.findall(r'\d+',i).pop() for i in fid.read().splitlines()
if re.match(r'^(?=#@)',i)]
#-- check that leap seconds file is still valid
expiry = datetime.datetime(1900,1,1) + datetime.timedelta(seconds=int(secs))
today = datetime.datetime.now()
update_leap_seconds() if (expiry < today) else None
#-- get leap seconds
leap_UTC,TAI_UTC = np.loadtxt(ATM1b_QFIT.utilities.get_data_path(leap_secs)).T
#-- TAI time is ahead of GPS by 19 seconds
TAI_GPS = 19.0
#-- convert leap second epochs from NTP to GPS
#-- convert from time of 2nd leap second to time of 1st leap second
leap_GPS = convert_delta_time(leap_UTC+TAI_UTC-TAI_GPS-1,
epoch1=(1900,1,1,0,0,0), epoch2=(1980,1,6,0,0,0))
#-- return the GPS times of leap second occurance
if truncate:
return leap_GPS[leap_GPS >= 0].astype(np.float64)
else:
return leap_GPS.astype(np.float64)
#-- PURPOSE: connects to servers and downloads leap second files
def update_leap_seconds(timeout=20, verbose=False, mode=0o775):
"""
Connects to servers to download leap-seconds.list files from NIST servers
- https://www.nist.gov/pml/time-and-frequency-division/leap-seconds-faqs
Servers and Mirrors
- ftp://ftp.nist.gov/pub/time/leap-seconds.list
- https://www.ietf.org/timezones/data/leap-seconds.list
Parameters
----------
timeout: int, default 20
timeout in seconds for blocking operations
verbose: bool, default False
print file information about output file
mode: oct, default 0o775
permissions mode of output file
"""
#-- local version of file
FILE = 'leap-seconds.list'
LOCAL = ATM1b_QFIT.utilities.get_data_path(['data',FILE])
HASH = ATM1b_QFIT.utilities.get_hash(LOCAL)
#-- try downloading from NIST ftp servers
HOST = ['ftp.nist.gov','pub','time',FILE]
try:
ATM1b_QFIT.utilities.check_ftp_connection(HOST[0])
ATM1b_QFIT.utilities.from_ftp(HOST, timeout=timeout, local=LOCAL,
hash=HASH, verbose=verbose, mode=mode)
except:
pass
else:
return
#-- try downloading from Internet Engineering Task Force (IETF) mirror
REMOTE = ['https://www.ietf.org','timezones','data',FILE]
try:
ATM1b_QFIT.utilities.from_http(REMOTE, timeout=timeout, local=LOCAL,
hash=HASH, verbose=verbose, mode=mode)
except:
pass
else:
return | ATM1b-QFIT | /ATM1b-QFIT-1.0.1.tar.gz/ATM1b-QFIT-1.0.1/ATM1b_QFIT/time.py | time.py |
u"""
read_ATM1b_QFIT_binary.py
Written by Tyler Sutterley (04/2021)
Reads Level-1b Airborne Topographic Mapper (ATM) QFIT binary data products
http://nsidc.org/data/docs/daac/icebridge/ilatm1b/docs/ReadMe.qfit.txt
Can be the following ATM QFIT file types:
ILATM1B: Airborne Topographic Mapper QFIT Elevation
BLATM1B: Pre-Icebridge Airborne Topographic Mapper QFIT Elevation
ILNSA1B: Narrow Swath Airborne Topographic Mapper QFIT Elevation
Based on the QFIT C reader provided on NSIDC
ftp://sidads.colorado.edu/pub/tools/icebridge/qfit/c/
INPUTS:
full_filename: full path to ATM QFIT .qi file (can have tilde-prefix)
OPTIONS:
SUBSETTER: subset dataset to specific indices
OUTPUTS:
Data variables for the given input .qi file format listed below
outputs are scaled from the inputs listed in the ReadMe.qfit.txt file
10-word format (used prior to 2006):
time: Relative Time (seconds from start of data file)
latitude: Laser Spot Latitude (degrees)
longitude: Laser Spot Longitude (degrees)
elevation: Elevation above WGS84 ellipsoid (meters)
xmt_sigstr: Start Pulse Signal Strength (relative)
rcv_sigstr: Reflected Laser Signal Strength (relative)
azimuth: Scan Azimuth (degrees)
pitch: Pitch (degrees)
roll: Roll (degrees)
time_hhmmss: GPS Time packed (example: 153320.1000 = 15h 33m 20.1s)
time_J2000: Time converted to seconds since 2000-01-01 12:00:00 UTC
12-word format (in use since 2006):
time: Relative Time (seconds from start of data file)
latitude: Laser Spot Latitude (degrees)
longitude: Laser Spot Longitude (degrees)
elevation: Elevation above WGS84 ellipsoid (meters)
xmt_sigstr: Start Pulse Signal Strength (relative)
rcv_sigstr: Reflected Laser Signal Strength (relative)
azimuth: Scan Azimuth (degrees)
pitch: Pitch (degrees)
roll: Roll (degrees)
gps_pdop: GPS PDOP (dilution of precision)
pulse_width: Laser received pulse width (digitizer samples)
time_hhmmss: GPS Time packed (example: 153320.1000 = 15h 33m 20.1s)
time_J2000: Time converted to seconds since 2000-01-01 12:00:00 UTC
14-word format (used in some surveys between 1997 and 2004):
time: Relative Time (seconds from start of data file)
latitude: Laser Spot Latitude (degrees)
longitude: Laser Spot Longitude (degrees)
elevation: Elevation above WGS84 ellipsoid (meters)
xmt_sigstr: Start Pulse Signal Strength (relative)
rcv_sigstr: Reflected Laser Signal Strength (relative)
azimuth: Scan Azimuth (degrees)
pitch: Pitch (degrees)
roll: Roll (degrees)
passive_sig: Passive Signal (relative)
pass_foot_lat: Passive Footprint Latitude (degrees)
pass_foot_long: Passive Footprint Longitude (degrees)
pass_foot_synth_elev: Passive Footprint Synthesized Elevation (meters)
time_hhmmss: GPS Time packed (example: 153320.1000 = 15h 33m 20.1s)
time_J2000: Time converted to seconds since 2000-01-01 12:00:00 UTC
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
PROGRAM DEPENDENCIES:
time.py: utilities for calculating time operations
UPDATE HISTORY:
Updated 04/2021: add function docstrings
Updated 02/2020: using python3 division for calculating record counts
using python3 compatible strings for header text
Updated 01/2020: updated regular expression operator for extracting dates
Updated 10/2018: updated GPS time calculation for calculating leap seconds
Updated 01/2018: simplified regex for extracting YYMMSS from filenames
Updated 10/2017: value as integer if big-endian (was outputting as list)
calculate and output time as J2000 in addition to packed hhmmss
Updated 06/2017: read and output ATM QFIT file headers
Written 05/2017
"""
from __future__ import print_function, division
import os
import re
import numpy as np
from ATM1b_QFIT.time import convert_calendar_dates, count_leap_seconds
#-- PURPOSE: get the record length and endianness of the input QFIT file
def get_record_length(fid):
"""
Get the record length and endianness of the QFIT file
Parameters
----------
fid: obj
Open file object for ATM QFIT file
"""
#-- assume initially big endian (all input data 32-bit integers)
dtype = np.dtype('>i4')
value, = np.fromfile(fid, dtype=dtype, count=1)
fid.seek(0)
#-- swap to little endian and reread first line
if (value > 100):
dtype = np.dtype('<i4')
value, = np.fromfile(fid, dtype=dtype, count=1)
fid.seek(0)
#-- get the number of variables
n_blocks = value//dtype.itemsize
#-- read past first record
np.fromfile(fid, dtype=dtype, count=n_blocks)
#-- return the number of variables and the endianness
return (n_blocks, dtype)
#-- PURPOSE: get length and text of ATM1b file headers
def read_ATM1b_QFIT_header(fid, n_blocks, dtype):
"""
Read the ATM QFIT file headers
Parameters
----------
fid: obj
Open file object for ATM QFIT file
n_blocks: int
record length
dtype: str or ob
Endianness of QFIT file
"""
header_count = 0
header_text = b''
value = np.full((n_blocks), -1, dtype=np.int32)
while (value[0] < 0):
#-- read past first record
line = fid.read(n_blocks*dtype.itemsize)
value = np.frombuffer(line, dtype=dtype, count=n_blocks)
header_text += bytes(line[dtype.itemsize:])
header_count += dtype.itemsize*n_blocks
#-- rewind file to previous record
fid.seek(header_count)
#-- remove last record from header text
header_text = header_text[:-dtype.itemsize*n_blocks]
#-- replace empty byte strings and whitespace
header_text = header_text.replace(b'\x00',b'').rstrip()
#-- decode header
return header_count, header_text.decode('utf-8')
#-- PURPOSE: read ATM L1b variables from a QFIT binary file
def read_ATM1b_QFIT_records(fid,n_blocks,n_records,dtype,date,SUBSETTER=None):
"""
Read ATM L1b variables from a QFIT binary file
Parameters
----------
fid: obj
Open file object for ATM QFIT file
n_blocks: int
Record length
n_records: int
Number of records in the QFIT file
dtype: str or obj
Endianness of QFIT file
date: tuple or list
Calendar date in year,month,day format
SUBSETTER: list or NoneType, default None
Subset dataset to specific indices
"""
#-- 10 word format = 0
#-- 12 word format = 1
#-- 14 word format = 2
w = (n_blocks-10)//2
#-- scaling factors for each variable for the 3 word formats (14 max)
scaling_table = [
[1e3, 1e6, 1e6, 1e3, 1, 1, 1e3, 1e3, 1e3, 1e3],
[1e3, 1e6, 1e6, 1e3, 1, 1, 1e3, 1e3, 1e3, 1.0e1, 1, 1e3],
[1e3, 1e6, 1e6, 1e3, 1, 1, 1e3, 1e3, 1e3, 1, 1e6, 1e6, 1e3, 1e3]]
#-- input variable names for the 3 word formats (14 max)
variable_table = []
#-- 10 word format
variable_table.append(['rel_time','latitude','longitude','elevation',
'xmt_sigstr','rcv_sigstr','azimuth','pitch','roll','time_hhmmss'])
#-- 12 word format
variable_table.append(['rel_time','latitude','longitude','elevation',
'xmt_sigstr','rcv_sigstr','azimuth','pitch','roll',
'gps_pdop','pulse_width','time_hhmmss'])
#-- 14 word format
variable_table.append(['rel_time','latitude','longitude','elevation',
'xmt_sigstr','rcv_sigstr','azimuth','pitch','roll','passive_sig',
'pass_foot_lat','pass_foot_long','pass_foot_synth_elev','time_hhmmss'])
#-- input variable data types for the 3 word formats (14 max)
dtype_table = []
#-- 10 word format
dtype_table.append(['f','f','f','f','i','i','f','f','f','f'])
#-- 12 word format
dtype_table.append(['f','f','f','f','i','i','f','f','f','f','i','f'])
#-- 14 word format
dtype_table.append(['f','f','f','f','i','i','f','f','f','i','f','f','f','f'])
#-- dictionary with output variables
ATM_L1b_input = {}
for n,d in zip(variable_table[w],dtype_table[w]):
ATM_L1b_input[n] = np.zeros((n_records), dtype=np.dtype(d))
#-- hour, minute and second from time_hhmmss
hour = np.zeros((n_records),dtype=np.float64)
minute = np.zeros((n_records),dtype=np.float64)
second = np.zeros((n_records),dtype=np.float64)
#-- for each record in the ATM Level-1b file
for r in range(n_records):
#-- set binary to point if using input subsetter
if SUBSETTER is not None:
fid.seek(SUBSETTER[r])
#-- input data record r
i = np.fromfile(fid,dtype=dtype,count=n_blocks)
#-- read variable and scale to output format
for v,n,d,s in zip(i,variable_table[w],dtype_table[w],scaling_table[w]):
ATM_L1b_input[n][r] = v.astype(d)/s
#-- unpack GPS time
time_hhmmss = '{0:010.3f}'.format(ATM_L1b_input['time_hhmmss'][r])
hour[r] = np.float64(time_hhmmss[:2])
minute[r] = np.float64(time_hhmmss[2:4])
second[r] = np.float64(time_hhmmss[4:])
#-- leap seconds for converting from GPS time to UTC
S = calc_GPS_to_UTC(date[0],date[1],date[2],hour,minute,second)
#-- calculation of Julian day
JD = calc_julian_day(date[0],date[1],date[2],hour,minute,second-S)
#-- converting to J2000 seconds
ATM_L1b_input['time_J2000'] = (JD - 2451545.0)*86400.0
#-- return the input data dictionary
return ATM_L1b_input
#-- PURPOSE: calculate the Julian day from calendar date
#-- http://scienceworld.wolfram.com/astronomy/JulianDate.html
def calc_julian_day(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND):
"""
Calculates the Julian day from calendar date
Parameters
----------
YEAR: float or int
Year
MONTH: float or int
Month of the year
DAY: float or int
Day of the month
HOUR: float or int
Hour of the day
MINUTE: float or int
minute of the hour
SECOND: float or int
second of the minute
"""
MJD = convert_calendar_dates(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND,
epoch=(1858,11,17,0,0,0), scale=1.0/86400.0)
return np.array(MJD + 2400000.5, dtype=np.float64)
#-- PURPOSE: calculate the number of leap seconds between GPS time (seconds
#-- since Jan 6, 1980 00:00:00) and UTC
def calc_GPS_to_UTC(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND):
"""
Gets the number of leaps seconds for a calendar date in GPS time
Parameters
----------
YEAR: float or int
Year (GPS)
MONTH: float or int
Month of the year (GPS)
DAY: float or int
Day of the month (GPS)
HOUR: float or int
Hour of the day (GPS)
MINUTE: float or int
minute of the hour (GPS)
SECOND: float or int
second of the minute (GPS)
"""
GPS_Time = convert_calendar_dates(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND,
epoch=(1980,1,6,0,0,0), scale=1.0)
return count_leap_seconds(GPS_Time)
#-- PURPOSE: get shape of ATM Level-1b binary file without reading data
def ATM1b_QFIT_shape(full_filename):
"""
Get the number of records within an ATM Level-1b binary file
Parameters
----------
full_filename: str
Path to ATM QFIT file
"""
#-- read the input file to get file information
fd = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fd)
#-- open the filename in binary read mode
fid = os.fdopen(fd, 'rb')
#-- get the number of variables and the endianness of the file
n_blocks,dtype = get_record_length(fid)
MAXARG = 14
#-- check that the number of blocks per record is less than MAXARG
if (n_blocks > MAXARG):
raise Exception('ERROR: Unexpected number of variables')
#-- read over header text
header_count,_ = read_ATM1b_QFIT_header(fid, n_blocks, dtype)
#-- number of records within file
n_records = (file_info.st_size-header_count)//n_blocks//dtype.itemsize
#-- close the input file
fid.close()
#-- return the data shape
return n_records
#-- PURPOSE: read ATM Level-1b QFIT binary file
def read_ATM1b_QFIT_binary(full_filename, SUBSETTER=None):
"""
Reads an ATM Level-1b binary file
Parameters
----------
full_filename: str
Path to ATM QFIT file
SUBSETTER: list or NoneType, default None
Subset dataset to specific indices
"""
#-- read the input file to get file information
fd = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fd)
#-- open the filename in binary read mode
fid = os.fdopen(fd, 'rb')
#-- regular expression pattern for extracting parameters
rx=re.compile((r'(BLATM1B|ILATM1B|ILNSA1B)_'
r'((\d{4})|(\d{2}))(\d{2})(\d{2})'
r'(.*?)\.qi$'),re.VERBOSE)
#-- extract mission and other parameters from filename
match_object = rx.match(os.path.basename(full_filename))
#-- convert year, month and day to float variables
year = np.float64(match_object.group(2))
month = np.float64(match_object.group(5))
day = np.float64(match_object.group(6))
#-- early date strings omitted century and millenia (e.g. 93 for 1993)
if match_object.group(4):
year = (year + 1900.0) if (year >= 90) else (year + 2000.0)
#-- get the number of variables and the endianness of the file
n_blocks,dtype = get_record_length(fid)
MAXARG = 14
#-- check that the number of blocks per record is less than MAXARG
if (n_blocks > MAXARG):
raise Exception('ERROR: Unexpected number of variables')
#-- read over header text
header_count,header_text = read_ATM1b_QFIT_header(fid, n_blocks, dtype)
#-- number of records to read with and without input subsetter
if SUBSETTER is None:
#-- number of records within file (file size - header size)
n_records = (file_info.st_size-header_count)//n_blocks//dtype.itemsize
else:
#-- number of records in subsetter
n_records = len(SUBSETTER)
#-- convert from data point indices into binary variable indices
SUBSETTER = header_count + dtype.itemsize*(np.array(SUBSETTER)*n_blocks)
#-- read input data
ATM_L1b_input = read_ATM1b_QFIT_records(fid, n_blocks, n_records, dtype,
[year, month, day], SUBSETTER=SUBSETTER)
#-- close the input file
fid.close()
#-- return the data and header text
return ATM_L1b_input, header_text | ATM1b-QFIT | /ATM1b-QFIT-1.0.1.tar.gz/ATM1b-QFIT-1.0.1/ATM1b_QFIT/read_ATM1b_QFIT_binary.py | read_ATM1b_QFIT_binary.py |
u"""
utilities.py
Written by Tyler Sutterley (04/2022)
Download and management utilities for syncing time and auxiliary files
PYTHON DEPENDENCIES:
lxml: processing XML and HTML in Python
https://pypi.python.org/pypi/lxml
UPDATE HISTORY:
Updated 04/2022: updated docstrings to numpy documentation format
Updated 10/2021: build python logging instance for handling verbose output
Updated 09/2021: added generic list from Apache http server
Updated 08/2021: added function to open a file path
Updated 07/2021: add parser for converting file files to arguments
Updated 03/2021: added sha1 option for retrieving file hashes
Updated 01/2021: added username and password to ftp functions
added ftp connection check
Updated 12/2020: added file object keyword for downloads if verbose
add url split function for creating url location lists
Updated 11/2020: normalize source and destination paths in copy
make context an optional keyword argument in from_http
Updated 09/2020: copy from http and https to bytesIO object in chunks
use netrc credentials if not entered from CDDIS functions
generalize build opener function for different Earthdata instances
Updated 08/2020: add GSFC CDDIS opener, login and download functions
Written 08/2020
"""
from __future__ import print_function, division
import sys
import os
import re
import io
import ssl
import netrc
import ftplib
import shutil
import base64
import socket
import getpass
import hashlib
import inspect
import logging
import builtins
import posixpath
import subprocess
import lxml.etree
import calendar, time
if sys.version_info[0] == 2:
from urllib import quote_plus
from cookielib import CookieJar
import urllib2
else:
from urllib.parse import quote_plus
from http.cookiejar import CookieJar
import urllib.request as urllib2
#-- PURPOSE: get absolute path within a package from a relative path
def get_data_path(relpath):
"""
Get the absolute path within a package from a relative path
Parameters
----------
relpath: str,
relative path
"""
#-- current file path
filename = inspect.getframeinfo(inspect.currentframe()).filename
filepath = os.path.dirname(os.path.abspath(filename))
if isinstance(relpath,list):
#-- use *splat operator to extract from list
return os.path.join(filepath,*relpath)
elif isinstance(relpath,str):
return os.path.join(filepath,relpath)
#-- PURPOSE: platform independent file opener
def file_opener(filename):
"""
Platform independent file opener
Parameters
----------
filename: str
path to file
"""
if (sys.platform == "win32"):
os.startfile(os.path.expanduser(filename), "explore")
elif (sys.platform == "darwin"):
subprocess.call(["open", os.path.expanduser(filename)])
else:
subprocess.call(["xdg-open", os.path.expanduser(filename)])
#-- PURPOSE: get the hash value of a file
def get_hash(local, algorithm='MD5'):
"""
Get the hash value from a local file or BytesIO object
Parameters
----------
local: obj or str
BytesIO object or path to file
algorithm: str, default 'MD5'
hashing algorithm for checksum validation
- ``'MD5'``: Message Digest
- ``'sha1'``: Secure Hash Algorithm
"""
#-- check if open file object or if local file exists
if isinstance(local, io.IOBase):
if (algorithm == 'MD5'):
return hashlib.md5(local.getvalue()).hexdigest()
elif (algorithm == 'sha1'):
return hashlib.sha1(local.getvalue()).hexdigest()
elif os.access(os.path.expanduser(local),os.F_OK):
#-- generate checksum hash for local file
#-- open the local_file in binary read mode
with open(os.path.expanduser(local), 'rb') as local_buffer:
#-- generate checksum hash for a given type
if (algorithm == 'MD5'):
return hashlib.md5(local_buffer.read()).hexdigest()
elif (algorithm == 'sha1'):
return hashlib.sha1(local_buffer.read()).hexdigest()
else:
return ''
#-- PURPOSE: recursively split a url path
def url_split(s):
"""
Recursively split a url path into a list
Parameters
----------
s: str
url string
"""
head, tail = posixpath.split(s)
if head in ('http:','https:','ftp:','s3:'):
return s,
elif head in ('', posixpath.sep):
return tail,
return url_split(head) + (tail,)
#-- PURPOSE: convert file lines to arguments
def convert_arg_line_to_args(arg_line):
"""
Convert file lines to arguments
Parameters
----------
arg_line: str
line string containing a single argument and/or comments
"""
#-- remove commented lines and after argument comments
for arg in re.sub(r'\#(.*?)$',r'',arg_line).split():
if not arg.strip():
continue
yield arg
#-- PURPOSE: returns the Unix timestamp value for a formatted date string
def get_unix_time(time_string, format='%Y-%m-%d %H:%M:%S'):
"""
Get the Unix timestamp value for a formatted date string
Parameters
----------
time_string: str
formatted time string to parse
format: str, default '%Y-%m-%d %H:%M:%S'
format for input time string
"""
try:
parsed_time = time.strptime(time_string.rstrip(), format)
except (TypeError, ValueError):
pass
else:
return calendar.timegm(parsed_time)
#-- PURPOSE: rounds a number to an even number less than or equal to original
def even(value):
"""
Rounds a number to an even number less than or equal to original
Parameters
----------
value: float
number to be rounded
"""
return 2*int(value//2)
#-- PURPOSE: rounds a number upward to its nearest integer
def ceil(value):
"""
Rounds a number upward to its nearest integer
Parameters
----------
value: float
number to be rounded upward
"""
return -int(-value//1)
#-- PURPOSE: make a copy of a file with all system information
def copy(source, destination, move=False, **kwargs):
"""
Copy or move a file with all system information
Parameters
----------
source: str
source file
destination: str
copied destination file
move: bool, default False
remove the source file
"""
source = os.path.abspath(os.path.expanduser(source))
destination = os.path.abspath(os.path.expanduser(destination))
#-- log source and destination
logging.info('{0} -->\n\t{1}'.format(source,destination))
shutil.copyfile(source, destination)
shutil.copystat(source, destination)
if move:
os.remove(source)
#-- PURPOSE: check ftp connection
def check_ftp_connection(HOST, username=None, password=None):
"""
Check internet connection with ftp host
Parameters
----------
HOST: str
remote ftp host
username: str or NoneType
ftp username
password: str or NoneType
ftp password
"""
#-- attempt to connect to ftp host
try:
f = ftplib.FTP(HOST)
f.login(username, password)
f.voidcmd("NOOP")
except IOError:
raise RuntimeError('Check internet connection')
except ftplib.error_perm:
raise RuntimeError('Check login credentials')
else:
return True
#-- PURPOSE: list a directory on a ftp host
def ftp_list(HOST, username=None, password=None, timeout=None,
basename=False, pattern=None, sort=False):
"""
List a directory on a ftp host
Parameters
----------
HOST: str or list
remote ftp host path split as list
username: str or NoneType
ftp username
password: str or NoneType
ftp password
timeout: int or NoneType, default None
timeout in seconds for blocking operations
basename: bool, default False
return the file or directory basename instead of the full path
pattern: str or NoneType, default None
regular expression pattern for reducing list
sort: bool, default False
sort output list
Returns
-------
output: list
items in a directory
mtimes: list
last modification times for items in the directory
"""
#-- verify inputs for remote ftp host
if isinstance(HOST, str):
HOST = url_split(HOST)
#-- try to connect to ftp host
try:
ftp = ftplib.FTP(HOST[0],timeout=timeout)
except (socket.gaierror,IOError):
raise RuntimeError('Unable to connect to {0}'.format(HOST[0]))
else:
ftp.login(username,password)
#-- list remote path
output = ftp.nlst(posixpath.join(*HOST[1:]))
#-- get last modified date of ftp files and convert into unix time
mtimes = [None]*len(output)
#-- iterate over each file in the list and get the modification time
for i,f in enumerate(output):
try:
#-- try sending modification time command
mdtm = ftp.sendcmd('MDTM {0}'.format(f))
except ftplib.error_perm:
#-- directories will return with an error
pass
else:
#-- convert the modification time into unix time
mtimes[i] = get_unix_time(mdtm[4:], format="%Y%m%d%H%M%S")
#-- reduce to basenames
if basename:
output = [posixpath.basename(i) for i in output]
#-- reduce using regular expression pattern
if pattern:
i = [i for i,f in enumerate(output) if re.search(pattern,f)]
#-- reduce list of listed items and last modified times
output = [output[indice] for indice in i]
mtimes = [mtimes[indice] for indice in i]
#-- sort the list
if sort:
i = [i for i,j in sorted(enumerate(output), key=lambda i: i[1])]
#-- sort list of listed items and last modified times
output = [output[indice] for indice in i]
mtimes = [mtimes[indice] for indice in i]
#-- close the ftp connection
ftp.close()
#-- return the list of items and last modified times
return (output, mtimes)
#-- PURPOSE: download a file from a ftp host
def from_ftp(HOST, username=None, password=None, timeout=None,
local=None, hash='', chunk=8192, verbose=False, fid=sys.stdout,
mode=0o775):
"""
Download a file from a ftp host
Parameters
----------
HOST: str or list
remote ftp host path
username: str or NoneType
ftp username
password: str or NoneType
ftp password
timeout: int or NoneType, default None
timeout in seconds for blocking operations
local: str or NoneType, default None
path to local file
hash: str, default ''
MD5 hash of local file
chunk: int, default 8192
chunk size for transfer encoding
verbose: bool, default False
print file transfer information
fid: obj, default sys.stdout
open file object to print if verbose
mode: oct, default 0o775
permissions mode of output local file
Returns
-------
remote_buffer: obj
BytesIO representation of file
"""
#-- create logger
loglevel = logging.INFO if verbose else logging.CRITICAL
logging.basicConfig(stream=fid, level=loglevel)
#-- verify inputs for remote ftp host
if isinstance(HOST, str):
HOST = url_split(HOST)
#-- try downloading from ftp
try:
#-- try to connect to ftp host
ftp = ftplib.FTP(HOST[0], timeout=timeout)
except (socket.gaierror,IOError):
raise RuntimeError('Unable to connect to {0}'.format(HOST[0]))
else:
ftp.login(username,password)
#-- remote path
ftp_remote_path = posixpath.join(*HOST[1:])
#-- copy remote file contents to bytesIO object
remote_buffer = io.BytesIO()
ftp.retrbinary('RETR {0}'.format(ftp_remote_path),
remote_buffer.write, blocksize=chunk)
remote_buffer.seek(0)
#-- save file basename with bytesIO object
remote_buffer.filename = HOST[-1]
#-- generate checksum hash for remote file
remote_hash = hashlib.md5(remote_buffer.getvalue()).hexdigest()
#-- get last modified date of remote file and convert into unix time
mdtm = ftp.sendcmd('MDTM {0}'.format(ftp_remote_path))
remote_mtime = get_unix_time(mdtm[4:], format="%Y%m%d%H%M%S")
#-- compare checksums
if local and (hash != remote_hash):
#-- convert to absolute path
local = os.path.abspath(local)
#-- create directory if non-existent
if not os.access(os.path.dirname(local), os.F_OK):
os.makedirs(os.path.dirname(local), mode)
#-- print file information
args = (posixpath.join(*HOST),local)
logging.info('{0} -->\n\t{1}'.format(*args))
#-- store bytes to file using chunked transfer encoding
remote_buffer.seek(0)
with open(os.path.expanduser(local), 'wb') as f:
shutil.copyfileobj(remote_buffer, f, chunk)
#-- change the permissions mode
os.chmod(local,mode)
#-- keep remote modification time of file and local access time
os.utime(local, (os.stat(local).st_atime, remote_mtime))
#-- close the ftp connection
ftp.close()
#-- return the bytesIO object
remote_buffer.seek(0)
return remote_buffer
#-- PURPOSE: check internet connection
def check_connection(HOST):
"""
Check internet connection with http host
Parameters
----------
HOST: str
remote http host
"""
#-- attempt to connect to http host
try:
urllib2.urlopen(HOST, timeout=20, context=ssl.SSLContext())
except urllib2.URLError:
raise RuntimeError('Check internet connection')
else:
return True
#-- PURPOSE: list a directory on an Apache http Server
def http_list(HOST, timeout=None, context=ssl.SSLContext(),
parser=lxml.etree.HTMLParser(), format='%Y-%m-%d %H:%M',
pattern='', sort=False):
"""
List a directory on an Apache http Server
Parameters
----------
HOST: str or list
remote http host path
timeout: int or NoneType, default None
timeout in seconds for blocking operations
context: obj, default ssl.SSLContext()
SSL context for url opener object
parser: obj, default lxml.etree.HTMLParser()
HTML parser for lxml
format: str, default '%Y-%m-%d %H:%M'
format for input time string
pattern: str, default ''
regular expression pattern for reducing list
sort: bool, default False
sort output list
Returns
-------
colnames: list
column names in a directory
collastmod: list
last modification times for items in the directory
"""
#-- verify inputs for remote http host
if isinstance(HOST, str):
HOST = url_split(HOST)
#-- try listing from http
try:
#-- Create and submit request.
request = urllib2.Request(posixpath.join(*HOST))
response = urllib2.urlopen(request, timeout=timeout, context=context)
except (urllib2.HTTPError, urllib2.URLError):
raise Exception('List error from {0}'.format(posixpath.join(*HOST)))
else:
#-- read and parse request for files (column names and modified times)
tree = lxml.etree.parse(response, parser)
colnames = tree.xpath('//tr/td[not(@*)]//a/@href')
#-- get the Unix timestamp value for a modification time
collastmod = [get_unix_time(i,format=format)
for i in tree.xpath('//tr/td[@align="right"][1]/text()')]
#-- reduce using regular expression pattern
if pattern:
i = [i for i,f in enumerate(colnames) if re.search(pattern, f)]
#-- reduce list of column names and last modified times
colnames = [colnames[indice] for indice in i]
collastmod = [collastmod[indice] for indice in i]
#-- sort the list
if sort:
i = [i for i,j in sorted(enumerate(colnames), key=lambda i: i[1])]
#-- sort list of column names and last modified times
colnames = [colnames[indice] for indice in i]
collastmod = [collastmod[indice] for indice in i]
#-- return the list of column names and last modified times
return (colnames, collastmod)
#-- PURPOSE: download a file from a http host
def from_http(HOST, timeout=None, context=ssl.SSLContext(),
local=None, hash='', chunk=16384, verbose=False, fid=sys.stdout,
mode=0o775):
"""
Download a file from a http host
Parameters
----------
HOST: str or list
remote http host path split as list
timeout: int or NoneType, default None
timeout in seconds for blocking operations
context: obj, default ssl.SSLContext()
SSL context for url opener object
timeout: int or NoneType, default None
timeout in seconds for blocking operations
local: str or NoneType, default None
path to local file
hash: str, default ''
MD5 hash of local file
chunk: int, default 16384
chunk size for transfer encoding
verbose: bool, default False
print file transfer information
fid: obj, default sys.stdout
open file object to print if verbose
mode: oct, default 0o775
permissions mode of output local file
Returns
-------
remote_buffer: obj
BytesIO representation of file
"""
#-- create logger
loglevel = logging.INFO if verbose else logging.CRITICAL
logging.basicConfig(stream=fid, level=loglevel)
#-- verify inputs for remote http host
if isinstance(HOST, str):
HOST = url_split(HOST)
#-- try downloading from http
try:
#-- Create and submit request.
request = urllib2.Request(posixpath.join(*HOST))
response = urllib2.urlopen(request, timeout=timeout, context=context)
except:
raise Exception('Download error from {0}'.format(posixpath.join(*HOST)))
else:
#-- copy remote file contents to bytesIO object
remote_buffer = io.BytesIO()
shutil.copyfileobj(response, remote_buffer, chunk)
remote_buffer.seek(0)
#-- save file basename with bytesIO object
remote_buffer.filename = HOST[-1]
#-- generate checksum hash for remote file
remote_hash = hashlib.md5(remote_buffer.getvalue()).hexdigest()
#-- compare checksums
if local and (hash != remote_hash):
#-- convert to absolute path
local = os.path.abspath(local)
#-- create directory if non-existent
if not os.access(os.path.dirname(local), os.F_OK):
os.makedirs(os.path.dirname(local), mode)
#-- print file information
args = (posixpath.join(*HOST),local)
logging.info('{0} -->\n\t{1}'.format(*args))
#-- store bytes to file using chunked transfer encoding
remote_buffer.seek(0)
with open(os.path.expanduser(local), 'wb') as f:
shutil.copyfileobj(remote_buffer, f, chunk)
#-- change the permissions mode
os.chmod(local,mode)
#-- return the bytesIO object
remote_buffer.seek(0)
return remote_buffer
# PURPOSE: attempt to build an opener with netrc
def attempt_login(urs, context=ssl.SSLContext(),
password_manager=True, get_ca_certs=False, redirect=False,
authorization_header=False, **kwargs):
"""
attempt to build a urllib opener for NASA Earthdata
Parameters
----------
urs: str
Earthdata login URS 3 host
context: obj, default ssl.SSLContext()
SSL context for url opener object
password_manager: bool, default True
Create password manager context using default realm
get_ca_certs: bool, default False
Get list of loaded “certification authority” certificates
redirect: bool, default False
Create redirect handler object
authorization_header: bool, default False
Add base64 encoded authorization header to opener
username: str, default from environmental variable
NASA Earthdata username
password: str, default from environmental variable
NASA Earthdata password
retries: int, default 5
number of retry attempts
netrc: str, default ~/.netrc
path to .netrc file for authentication
Returns
-------
opener: obj
OpenerDirector instance
"""
# set default keyword arguments
kwargs.setdefault('username', os.environ.get('EARTHDATA_USERNAME'))
kwargs.setdefault('password', os.environ.get('EARTHDATA_PASSWORD'))
kwargs.setdefault('retries', 5)
kwargs.setdefault('netrc', os.path.expanduser('~/.netrc'))
try:
# only necessary on jupyterhub
os.chmod(kwargs['netrc'], 0o600)
# try retrieving credentials from netrc
username, _, password = netrc.netrc(kwargs['netrc']).authenticators(urs)
except Exception as e:
# try retrieving credentials from environmental variables
username, password = (kwargs['username'], kwargs['password'])
pass
# if username or password are not available
if not username:
username = builtins.input('Username for {0}: '.format(urs))
if not password:
prompt = 'Password for {0}@{1}: '.format(username, urs)
password = getpass.getpass(prompt=prompt)
# for each retry
for retry in range(kwargs['retries']):
# build an opener for urs with credentials
opener = build_opener(username, password,
context=context,
password_manager=password_manager,
get_ca_certs=get_ca_certs,
redirect=redirect,
authorization_header=authorization_header,
urs=urs)
# try logging in by check credentials
try:
check_credentials()
except Exception as e:
pass
else:
return opener
# reattempt login
username = builtins.input('Username for {0}: '.format(urs))
password = getpass.getpass(prompt=prompt)
# reached end of available retries
raise RuntimeError('End of Retries: Check NASA Earthdata credentials')
#-- PURPOSE: "login" to NASA Earthdata with supplied credentials
def build_opener(username, password, context=ssl.SSLContext(ssl.PROTOCOL_TLS),
password_manager=True, get_ca_certs=True, redirect=True,
authorization_header=False, urs='https://urs.earthdata.nasa.gov'):
"""
build urllib opener for NASA Earthdata with supplied credentials
Parameters
----------
username: str or NoneType, default None
NASA Earthdata username
password: str or NoneType, default None
NASA Earthdata password
context: obj, default ssl.SSLContext()
SSL context for url opener object
password_manager: bool, default True
Create password manager context using default realm
get_ca_certs: bool, default True
Get list of loaded “certification authority” certificates
redirect: bool, default True
Create redirect handler object
authorization_header: bool, default False
Add base64 encoded authorization header to opener
urs: str, default 'https://urs.earthdata.nasa.gov'
Earthdata login URS 3 host
"""
#-- https://docs.python.org/3/howto/urllib2.html#id5
handler = []
#-- create a password manager
if password_manager:
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
#-- Add the username and password for NASA Earthdata Login system
password_mgr.add_password(None, urs, username, password)
handler.append(urllib2.HTTPBasicAuthHandler(password_mgr))
#-- Create cookie jar for storing cookies. This is used to store and return
#-- the session cookie given to use by the data server (otherwise will just
#-- keep sending us back to Earthdata Login to authenticate).
cookie_jar = CookieJar()
handler.append(urllib2.HTTPCookieProcessor(cookie_jar))
#-- SSL context handler
if get_ca_certs:
context.get_ca_certs()
handler.append(urllib2.HTTPSHandler(context=context))
#-- redirect handler
if redirect:
handler.append(urllib2.HTTPRedirectHandler())
#-- create "opener" (OpenerDirector instance)
opener = urllib2.build_opener(*handler)
#-- Encode username/password for request authorization headers
#-- add Authorization header to opener
if authorization_header:
b64 = base64.b64encode('{0}:{1}'.format(username, password).encode())
opener.addheaders = [("Authorization","Basic {0}".format(b64.decode()))]
#-- Now all calls to urllib2.urlopen use our opener.
urllib2.install_opener(opener)
#-- All calls to urllib2.urlopen will now use handler
#-- Make sure not to include the protocol in with the URL, or
#-- HTTPPasswordMgrWithDefaultRealm will be confused.
return opener
#-- PURPOSE: check that entered NASA Earthdata credentials are valid
def check_credentials():
"""
Check that entered NASA Earthdata credentials are valid
"""
try:
remote_path = posixpath.join('https://cddis.nasa.gov','archive')
request = urllib2.Request(url=remote_path)
response = urllib2.urlopen(request, timeout=20)
except urllib2.HTTPError:
raise RuntimeError('Check your NASA Earthdata credentials')
except urllib2.URLError:
raise RuntimeError('Check internet connection')
else:
return True
#-- PURPOSE: list a directory on GSFC CDDIS https server
def cddis_list(HOST, username=None, password=None, build=True,
timeout=None, parser=lxml.etree.HTMLParser(), pattern='',
sort=False):
"""
List a directory on GSFC CDDIS archive server
Parameters
----------
HOST: str or list
remote https host
username: str or NoneType, default None
NASA Earthdata username
password: str or NoneType, default None
NASA Earthdata password
build: bool, default True
Build opener and check Earthdata credentials
timeout: int or NoneType, default None
timeout in seconds for blocking operations
parser: obj, default lxml.etree.HTMLParser()
HTML parser for lxml
pattern: str, default ''
regular expression pattern for reducing list
sort: bool, default False
sort output list
Returns
-------
colnames: list
column names in a directory
collastmod: list
last modification times for items in the directory
"""
#-- use netrc credentials
if build and not (username or password):
urs = 'urs.earthdata.nasa.gov'
username,_,password = netrc.netrc().authenticators(urs)
#-- build urllib2 opener and check credentials
if build:
#-- build urllib2 opener with credentials
build_opener(username, password)
#-- check credentials
check_credentials()
#-- verify inputs for remote https host
if isinstance(HOST, str):
HOST = url_split(HOST)
#-- Encode username/password for request authorization headers
base64_string = base64.b64encode('{0}:{1}'.format(username, password).encode())
authorization_header = "Basic {0}".format(base64_string.decode())
#-- try listing from https
try:
#-- Create and submit request.
request = urllib2.Request(posixpath.join(*HOST))
request.add_header("Authorization", authorization_header)
tree = lxml.etree.parse(urllib2.urlopen(request, timeout=timeout), parser)
except:
raise Exception('List error from {0}'.format(posixpath.join(*HOST)))
else:
#-- read and parse request for files (column names and modified times)
#-- find directories
colnames = tree.xpath('//div[@class="archiveDir"]/div/a/text()')
collastmod = [None]*(len(colnames))
#-- find files
colnames.extend(tree.xpath('//div[@class="archiveItem"]/div/a/text()'))
#-- get the Unix timestamp value for a modification time
collastmod.extend([get_unix_time(i[:19], format='%Y:%m:%d %H:%M:%S')
for i in tree.xpath('//div[@class="archiveItem"]/div/span/text()')])
#-- reduce using regular expression pattern
if pattern:
i = [i for i,f in enumerate(colnames) if re.search(pattern, f)]
#-- reduce list of column names and last modified times
colnames = [colnames[indice] for indice in i]
collastmod = [collastmod[indice] for indice in i]
#-- sort the list
if sort:
i = [i for i,j in sorted(enumerate(colnames), key=lambda i: i[1])]
#-- sort list of column names and last modified times
colnames = [colnames[indice] for indice in i]
collastmod = [collastmod[indice] for indice in i]
#-- return the list of column names and last modified times
return (colnames, collastmod)
#-- PURPOSE: download a file from a GSFC CDDIS https server
def from_cddis(HOST, username=None, password=None, build=True,
timeout=None, local=None, hash='', chunk=16384, verbose=False,
fid=sys.stdout, mode=0o775):
"""
Download a file from GSFC CDDIS archive server
Parameters
----------
HOST: str or list
remote https host
username: str or NoneType, default None
NASA Earthdata username
password: str or NoneType, default None
NASA Earthdata password
build: bool, default True
Build opener and check Earthdata credentials
timeout: int or NoneType, default None
timeout in seconds for blocking operations
local: str or NoneType, default None
path to local file
hash: str, default ''
MD5 hash of local file
chunk: int, default 16384
chunk size for transfer encoding
verbose: bool, default False
print file transfer information
fid: obj, default sys.stdout
open file object to print if verbose
mode: oct, default 0o775
permissions mode of output local file
Returns
-------
remote_buffer: obj
BytesIO representation of file
"""
#-- create logger
loglevel = logging.INFO if verbose else logging.CRITICAL
logging.basicConfig(stream=fid, level=loglevel)
#-- use netrc credentials
if build and not (username or password):
urs = 'urs.earthdata.nasa.gov'
username,_,password = netrc.netrc().authenticators(urs)
#-- build urllib2 opener and check credentials
if build:
#-- build urllib2 opener with credentials
build_opener(username, password)
#-- check credentials
check_credentials()
#-- verify inputs for remote https host
if isinstance(HOST, str):
HOST = url_split(HOST)
#-- Encode username/password for request authorization headers
base64_string = base64.b64encode('{0}:{1}'.format(username, password).encode())
authorization_header = "Basic {0}".format(base64_string.decode())
#-- try downloading from https
try:
#-- Create and submit request.
request = urllib2.Request(posixpath.join(*HOST))
request.add_header("Authorization", authorization_header)
response = urllib2.urlopen(request, timeout=timeout)
except:
raise Exception('Download error from {0}'.format(posixpath.join(*HOST)))
else:
#-- copy remote file contents to bytesIO object
remote_buffer = io.BytesIO()
shutil.copyfileobj(response, remote_buffer, chunk)
remote_buffer.seek(0)
#-- save file basename with bytesIO object
remote_buffer.filename = HOST[-1]
#-- generate checksum hash for remote file
remote_hash = hashlib.md5(remote_buffer.getvalue()).hexdigest()
#-- compare checksums
if local and (hash != remote_hash):
#-- convert to absolute path
local = os.path.abspath(local)
#-- create directory if non-existent
if not os.access(os.path.dirname(local), os.F_OK):
os.makedirs(os.path.dirname(local), mode)
#-- print file information
args = (posixpath.join(*HOST),local)
logging.info('{0} -->\n\t{1}'.format(*args))
#-- store bytes to file using chunked transfer encoding
remote_buffer.seek(0)
with open(os.path.expanduser(local), 'wb') as f:
shutil.copyfileobj(remote_buffer, f, chunk)
#-- change the permissions mode
os.chmod(local,mode)
#-- return the bytesIO object
remote_buffer.seek(0)
return remote_buffer | ATM1b-QFIT | /ATM1b-QFIT-1.0.1.tar.gz/ATM1b-QFIT-1.0.1/ATM1b_QFIT/utilities.py | utilities.py |
u"""
nsidc_convert_ILATM1b.py
Written by Tyler Sutterley (02/2020)
Reads IceBridge ATM QFIT binary files directly from the
National Snow and Ice Data Center (NSIDC) and outputs as HDF5 files
http://nsidc.org/data/docs/daac/icebridge/ilatm1b/docs/ReadMe.qfit.txt
https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
https://nsidc.org/support/faq/what-options-are-available-bulk-downloading-data-
https-earthdata-login-enabled
http://www.voidspace.org.uk/python/articles/authentication.shtml#base64
Register with NASA Earthdata Login system:
https://urs.earthdata.nasa.gov
Add NSIDC_DATAPOOL_OPS to NASA Earthdata Applications
https://urs.earthdata.nasa.gov/oauth/authorize?client_id=_JLuwMHxb2xX6NwYTb4dRA
CALLING SEQUENCE:
python nsidc_convert_ILATM1b.py --user=<username> ILATM1B BLATM1B
where <username> is your NASA Earthdata username
INPUTS:
ILATM1B: Airborne Topographic Mapper QFIT Elevation
BLATM1B: Pre-Icebridge Airborne Topographic Mapper QFIT Elevation
ILNSA1B: Narrow Swath Airborne Topographic Mapper QFIT Elevation
COMMAND LINE OPTIONS:
--help: list the command line options
-U X, --user X: username for NASA Earthdata Login
-W X, --password X: Password for NASA Earthdata Login
-N X, --netrc X: path to .netrc file for alternative authentication
-D X, --directory X: working data directory
-Y X, --year X: years to sync
-S X, --subdirectory X: specific subdirectories to sync
-V, --verbose: Verbose output of files synced
-T X, --timeout X: Timeout in seconds for blocking operations
-R X, --retry X: Connection retry attempts
-C, --clobber: Overwrite existing data in transfer
-M X, --mode X: Local permissions mode of the directories and files synced
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://www.h5py.org/
lxml: Pythonic XML and HTML processing library using libxml2/libxslt
https://lxml.de/
https://github.com/lxml/lxml
future: Compatibility layer between Python 2 and Python 3
http://python-future.org/
PROGRAM DEPENDENCIES:
time.py: utilities for calculating time operations
utilities.py: download and management utilities for syncing files
UPDATE HISTORY:
Updated 06/2022: using argparse to set command line parameters
added options for number of retries and timeout
using logging for verbose output
Updated 02/2020: using python3 compatible division for calculating counts
Updated 01/2020: updated regular expression operator for extracting dates
Updated 09/2019: added ssl context to urlopen headers
Updated 06/2019: use strptime to extract last modified time of remote files
Updated 12/2018: decode authorization header for python3 compatibility
Updated 11/2018: encode base64 strings for python3 compatibility
Updated 10/2018: updated GPS time calculation for calculating leap seconds
Updated 07/2018 for public release
"""
from __future__ import print_function, division
import sys
import os
import re
import h5py
import shutil
import logging
import argparse
import posixpath
import lxml.etree
import numpy as np
import calendar, time
import ATM1b_QFIT.time
import ATM1b_QFIT.utilities
#-- PURPOSE: sync the Icebridge Level-1b ATM QFIT elevation data from NSIDC
def nsidc_convert_ILATM1b(DIRECTORY, PRODUCTS, YEARS=None, SUBDIRECTORY=None,
TIMEOUT=None, RETRY=1, CLOBBER=False, MODE=0o775):
#-- Airborne Topographic Mapper Product (Level-1b)
#-- remote directories for each dataset on NSIDC server
remote_directories = {}
#-- regular expression for file prefixes of each product
remote_regex_pattern = {}
#-- Airborne Topographic Mapper QFIT Elevation (Level-1b)
remote_directories['ILATM1B'] = ["ICEBRIDGE","ILATM1B.001"]
remote_directories['BLATM1B'] = ["PRE_OIB","BLATM1B.001"]
remote_regex_pattern['ILATM1B'] = '(ILATM1B)'
remote_regex_pattern['BLATM1B'] = '(BLATM1B)'
#-- Narrow Swath Airborne Topographic Mapper QFIT Elevation (Level-1b)
remote_directories['ILNSA1B'] = ["ICEBRIDGE","ILNSA1B.001"]
remote_regex_pattern['ILNSA1B'] = '(ILNSA1B)'
#-- compile HTML parser for lxml
parser = lxml.etree.HTMLParser()
#-- remote https server for Icebridge Data
HOST = 'https://n5eil01u.ecs.nsidc.org'
#-- regular expression operator for finding icebridge-style subdirectories
if SUBDIRECTORY:
#-- Sync particular subdirectories for product
R2 = re.compile('('+'|'.join(SUBDIRECTORY)+')', re.VERBOSE)
elif YEARS:
#-- Sync particular years for product
regex_pattern = '|'.join('{0:d}'.format(y) for y in YEARS)
R2 = re.compile('({0}).(\d+).(\d+)'.format(regex_pattern), re.VERBOSE)
else:
#-- Sync all available years for product
R2 = re.compile('(\d+).(\d+).(\d+)', re.VERBOSE)
#-- for each ATM product listed
for p in PRODUCTS:
logging.info('PRODUCT={0}'.format(p))
#-- get subdirectories from remote directory
d=posixpath.join(HOST,remote_directories[p][0],remote_directories[p][1])
req = ATM1b_QFIT.utilities.urllib2.Request(url=d)
#-- read and parse request for subdirectories (find column names)
tree = lxml.etree.parse(ATM1b_QFIT.utilities.urllib2.urlopen(req), parser)
colnames = tree.xpath('//td[@class="indexcolname"]//a/@href')
remote_sub = [sd for sd in colnames if R2.match(sd)]
#-- for each remote subdirectory
for sd in remote_sub:
#-- check if data directory exists and recursively create if not
local_dir = os.path.join(DIRECTORY,sd)
os.makedirs(local_dir,MODE) if not os.path.exists(local_dir) else None
#-- find Icebridge data files
req = ATM1b_QFIT.utilities.urllib2.Request(url=posixpath.join(d,sd))
#-- read and parse request for remote files (columns and dates)
tree = lxml.etree.parse(ATM1b_QFIT.utilities.urllib2.urlopen(req), parser)
colnames = tree.xpath('//td[@class="indexcolname"]//a/@href')
collastmod = tree.xpath('//td[@class="indexcollastmod"]/text()')
remote_file_lines = [i for i,f in enumerate(colnames) if
re.match(remote_regex_pattern[p],f)]
#-- sync each Icebridge data file
for i in remote_file_lines:
#-- remote and local versions of the file
remote_file = posixpath.join(d,sd,colnames[i])
local_file = os.path.join(local_dir,colnames[i])
#-- get last modified date and convert into unix time
remote_mtime = ATM1b_QFIT.utilities.get_unix_time(collastmod[i],
format='%Y-%m-%d %H:%M')
#-- sync Icebridge files with NSIDC server
http_pull_file(remote_file, remote_mtime, local_file,
TIMEOUT=TIMEOUT, RETRY=RETRY, CLOBBER=CLOBBER, MODE=MODE)
#-- close request
req = None
#-- PURPOSE: pull file from a remote host checking if file exists locally
#-- and if the remote file is newer than the local file
#-- read the input file and output as HDF5
def http_pull_file(remote_file, remote_mtime, local_file,
TIMEOUT=None, RETRY=1, CLOBBER=False, MODE=0o775):
#-- split extension from input ATM data file
fileBasename, fileExtension = os.path.splitext(local_file)
#-- copy Level-2 file from server into new HDF5 file
if (fileExtension == '.qi'):
local_file = '{0}.h5'.format(fileBasename)
#-- if file exists in file system: check if remote file is newer
TEST = False
OVERWRITE = ' (clobber)'
#-- check if local version of file exists
if os.access(local_file, os.F_OK):
#-- check last modification time of local file
local_mtime = os.stat(local_file).st_mtime
#-- if remote file is newer: overwrite the local file
if (remote_mtime > local_mtime):
TEST = True
OVERWRITE = ' (overwrite)'
else:
TEST = True
OVERWRITE = ' (new)'
#-- if file does not exist locally, is to be overwritten, or CLOBBER is set
if TEST or CLOBBER:
#-- Printing files transferred
logging.info('{0} --> '.format(remote_file))
logging.info('\t{0}{1}\n'.format(local_file,OVERWRITE))
#-- Create and submit request. There are a wide range of exceptions
#-- that can be thrown here, including HTTPError and URLError.
#-- chunked transfer encoding size
CHUNK = 16 * 1024
#-- attempt to download up to the number of retries
retry_counter = 0
while (retry_counter < RETRY):
#-- attempt to retrieve file from https server
try:
#-- Create and submit request
#-- There are a range of exceptions that can be thrown
#-- including HTTPError and URLError.
fid = ATM1b_QFIT.utilities.from_http(remote_file,
timeout=TIMEOUT, context=None,
chunk=CHUNK)
except:
pass
else:
break
#-- add to retry counter
retry_counter += 1
#-- Download xml files using shutil chunked transfer encoding
if (fileExtension == '.xml'):
#-- copy contents to local file using chunked transfer encoding
#-- transfer should work properly with ascii and binary data formats
with open(local_file, 'wb') as f:
shutil.copyfileobj(fid, f, CHUNK)
else:
#-- read input data
ATM_L1b_input, ATM_L1b_header = read_ATM_QFIT_file(fid)
HDF5_icebridge_ATM1b(ATM_L1b_input, FILENAME=local_file,
INPUT_FILE=remote_file, HEADER=ATM_L1b_header)
#-- keep remote modification time of file and local access time
os.utime(local_file, (os.stat(local_file).st_atime, remote_mtime))
os.chmod(local_file, MODE)
#-- PURPOSE: read the ATM Level-1b data file
def read_ATM_QFIT_file(fid):
#-- get the number of variables and the endianness of the file
file_info = fid.getbuffer().nbytes
n_blocks,dtype = get_record_length(fid)
MAXARG = 14
#-- check that the number of blocks per record is less than MAXARG
if (n_blocks > MAXARG):
raise Exception('ERROR: Unexpected number of variables')
#-- read over header text
header_count,header_text = read_ATM1b_QFIT_header(fid, n_blocks, dtype)
#-- number of records within file
n_records = (file_info - header_count)//n_blocks//dtype.itemsize
#-- read input data
ATM_L1b_input = read_ATM1b_QFIT_records(fid, n_blocks, n_records, dtype)
#-- close the input file
fid.close()
#-- return the data
return ATM_L1b_input, header_text
#-- PURPOSE: get the record length and endianness of the input QFIT file
def get_record_length(fid):
#-- assume initially big endian (all input data 32-bit integers)
dtype = np.dtype('>i4')
value, = np.frombuffer(fid.read(dtype.itemsize), dtype=dtype, count=1)
fid.seek(0)
#-- swap to little endian and reread first line
if (value > 100):
dtype = np.dtype('<i4')
value, = np.frombuffer(fid.read(dtype.itemsize), dtype=dtype, count=1)
fid.seek(0)
#-- get the number of variables
n_blocks = value//dtype.itemsize
#-- read past first record
np.frombuffer(fid.read(n_blocks*dtype.itemsize), dtype=dtype, count=n_blocks)
#-- return the number of variables and the endianness
return (n_blocks, dtype)
#-- PURPOSE: get length and text of ATM1b file headers
def read_ATM1b_QFIT_header(fid, n_blocks, dtype):
header_count = 0
header_text = b''
value = np.full((n_blocks), -1, dtype=np.int32)
while (value[0] < 0):
#-- read past first record
line = fid.read(n_blocks*dtype.itemsize)
value = np.frombuffer(line, dtype=dtype, count=n_blocks)
header_text += bytes(line[dtype.itemsize:])
header_count += dtype.itemsize*n_blocks
#-- rewind file to previous record
fid.seek(header_count)
#-- remove last record from header text
header_text = header_text[:-dtype.itemsize*n_blocks]
#-- replace empty byte strings and whitespace
header_text = header_text.replace(b'\x00',b'').rstrip()
#-- decode header
return header_count, header_text.decode('utf-8')
#-- PURPOSE: read ATM L1b variables from a QFIT binary file
def read_ATM1b_QFIT_records(fid,n_blocks,n_records,dtype):
#-- 10 word format = 0
#-- 12 word format = 1
#-- 14 word format = 2
w = (n_blocks-10)//2
#-- scaling factors for each variable for the 3 word formats (14 max)
scaling_table = [
[1e3, 1e6, 1e6, 1e3, 1, 1, 1e3, 1e3, 1e3, 1e3],
[1e3, 1e6, 1e6, 1e3, 1, 1, 1e3, 1e3, 1e3, 1.0e1, 1, 1e3],
[1e3, 1e6, 1e6, 1e3, 1, 1, 1e3, 1e3, 1e3, 1, 1e6, 1e6, 1e3, 1e3]]
#-- input variable names for the 3 word formats (14 max)
variable_table = []
#-- 10 word format
variable_table.append(['rel_time','latitude','longitude','elevation',
'xmt_sigstr','rcv_sigstr','azimuth','pitch','roll','time_hhmmss'])
#-- 12 word format
variable_table.append(['rel_time','latitude','longitude','elevation',
'xmt_sigstr','rcv_sigstr','azimuth','pitch','roll',
'gps_pdop','pulse_width','time_hhmmss'])
#-- 14 word format
variable_table.append(['rel_time','latitude','longitude','elevation',
'xmt_sigstr','rcv_sigstr','azimuth','pitch','roll','passive_sig',
'pass_foot_lat','pass_foot_long','pass_foot_synth_elev','time_hhmmss'])
#-- input variable data types for the 3 word formats (14 max)
dtype_table = []
#-- 10 word format
dtype_table.append(['f','f','f','f','i','i','f','f','f','f'])
#-- 12 word format
dtype_table.append(['f','f','f','f','i','i','f','f','f','f','i','f'])
#-- 14 word format
dtype_table.append(['f','f','f','f','i','i','f','f','f','i','f','f','f','f'])
#-- dictionary with output variables
ATM_L1b_input = {}
for n,d in zip(variable_table[w],dtype_table[w]):
ATM_L1b_input[n] = np.zeros((n_records), dtype=np.dtype(d))
#-- for each record in the ATM Level-1b file
for r in range(n_records):
#-- input data record r
i = np.frombuffer(fid.read(n_blocks*dtype.itemsize),
dtype=dtype, count=n_blocks)
#-- read variable and scale to output format
for v,n,d,s in zip(i,variable_table[w],dtype_table[w],scaling_table[w]):
ATM_L1b_input[n][r] = v.astype(d)/s
#-- return the input data dictionary
return ATM_L1b_input
#-- PURPOSE: calculate the number of leap seconds between GPS time (seconds
#-- since Jan 6, 1980 00:00:00) and UTC
def calc_GPS_to_UTC(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND):
GPS_Time = ATM1b_QFIT.time.convert_calendar_dates(
YEAR, MONTH, DAY, HOUR, MINUTE, SECOND,
epoch=(1980,1,6,0,0,0), scale=1.0)
return ATM1b_QFIT.time.count_leap_seconds(GPS_Time)
#-- PURPOSE: output HDF5 file with geolocated elevation surfaces calculated
#-- from LVIS Level-1b waveform products
def HDF5_icebridge_ATM1b(ILATM1b_MDS,FILENAME=None,INPUT_FILE=None,HEADER=''):
#-- open output HDF5 file
fileID = h5py.File(FILENAME, 'w')
#-- create sub-groups within HDF5 file
fileID.create_group('instrument_parameters')
#-- Dimensions of parameters
n_records, = ILATM1b_MDS['elevation'].shape
#-- regular expression pattern for extracting parameters
rx=re.compile(('(BLATM1B|ILATM1B|ILNSA1B)_((\d{4})|(\d{2}))(\d{2})(\d{2})'
'(.*?)\.qi$'),re.VERBOSE)
#-- extract mission and other parameters from filename
match_object = rx.match(os.path.basename(INPUT_FILE))
MISSION = match_object.group(1)
#-- convert year, month and day to int variables
year = np.int64(match_object.group(2))
month = np.int64(match_object.group(5))
day = np.int64(match_object.group(6))
#-- early date strings omitted century and millenia (e.g. 93 for 1993)
if match_object.group(4):
year = (year + 1900) if (year >= 90) else (year + 2000)
#-- extract end time from time_hhmmss variable
hour = np.zeros((2)); minute = np.zeros((2)); second = np.zeros((2))
for i,ind in enumerate([0,-1]):
#-- convert to zero-padded string with 3 decimal points
line_contents = '{0:010.3f}'.format(ILATM1b_MDS['time_hhmmss'][ind])
hour[i] = np.float64(line_contents[:2])
minute[i] = np.float64(line_contents[2:4])
second[i] = np.float64(line_contents[4:])
#-- Defining output HDF5 variable attributes
#-- Latitude
attributes = {}
attributes['latitude'] = {}
attributes['latitude']['long_name'] = 'latitude'
attributes['latitude']['units'] = 'degrees_north'
attributes['latitude']['description'] = 'Laser Spot Latitude'
#-- Longitude
attributes['longitude'] = {}
attributes['longitude']['long_name'] = 'Longitude'
attributes['longitude']['units'] = 'degrees_east'
attributes['longitude']['description'] = 'Laser Spot East Longitude'
#-- Elevation
attributes['elevation'] = {}
attributes['elevation']['long_name'] = 'Elevation'
attributes['elevation']['units'] = 'meters'
attributes['elevation']['description'] = ('Elevation of the laser '
'spot above ellipsoid')
#-- Relative Time
attributes['rel_time'] = {}
attributes['rel_time']['long_name'] = 'Transmit time of each shot'
attributes['rel_time']['units'] = 'seconds'
attributes['rel_time']['description'] = ('Relative Time measured from'
' start of file')
#-- time_hhmmss
attributes['time_hhmmss'] = {}
attributes['time_hhmmss']['long_name'] = 'Packed Time'
attributes['time_hhmmss']['description'] = ('GPS time packed, example: '
'153320.100 = 15 hours 33 minutes 20.1 seconds.')
#-- azimuth
attributes['azimuth'] = {}
attributes['azimuth']['long_name'] = 'Scan Azimuth'
attributes['azimuth']['units'] = 'degrees'
attributes['azimuth']['description'] = ('Position of the rotating ATM '
'scanner mirror.')
attributes['azimuth']['valid_min'] = 0.0
attributes['azimuth']['valid_max'] = 360.0
#-- pitch
attributes['pitch'] = {}
attributes['pitch']['long_name'] = 'Pitch'
attributes['pitch']['units'] = 'degrees'
attributes['pitch']['description'] = 'Pitch component of aircraft attitude.'
#-- roll
attributes['roll'] = {}
attributes['roll']['long_name'] = 'Roll'
attributes['roll']['units'] = 'degrees'
attributes['roll']['description'] = 'Roll component of aircraft attitude.'
#-- gps_pdop
attributes['gps_pdop'] = {}
attributes['gps_pdop']['long_name'] = 'GPS Dilution of Precision'
attributes['gps_pdop']['description'] = 'GPS Dilution of Precision (PDOP)'
#-- pulse_width
attributes['pulse_width'] = {}
attributes['pulse_width']['long_name'] = 'Pulse Width'
attributes['pulse_width']['units'] = 'counts'
attributes['pulse_width']['description'] = ('Laser received pulse width at '
'half height, number of digitizer samples at 0.5 nanosecond per sample.')
#-- xmt_sigstr
attributes['xmt_sigstr'] = {}
attributes['xmt_sigstr']['long_name'] = 'Start Pulse Signal Strength'
attributes['xmt_sigstr']['description'] = ('Transmitted Pulse Signal Strength'
' (relative). The sum of waveform digitizer samples within the laser '
'pulse sampled at the laser output. Units are in digitizer counts.')
#-- rcv_sigstr
attributes['rcv_sigstr'] = {}
attributes['rcv_sigstr']['long_name'] = 'Rcvd Signal Strength'
attributes['rcv_sigstr']['description'] = ('Received Laser Signal Strength '
'(relative). This is the sum taken over the received pulse of the '
'waveform samples in units of digitizer counts.')
#-- 14 word count variables
#-- Between 1997 and 2004 some ATM surveys included a separate sensor to
#-- measure passive brightness. The passive data is not calibrated and
#-- its use, if any, should be qualitative in nature. It may aid
#-- the interpretation of terrain features. The measurement capability
#-- was engineered into the ATM sensors to aid in the identification
#-- of the water/beach interface acquired with the instrument in
#-- coastal mapping applications.
attributes['passive_sig'] = {}
attributes['passive_sig']['long_name'] = 'Passive Signal (relative)'
attributes['passive_sig']['description'] = ("Measure of radiance reflected "
"from the earth's surface within the vicinity of the laser pulse")
attributes['pass_foot_lat'] = {}
attributes['pass_foot_lat']['long_name'] = 'Passive Footprint Latitude'
attributes['pass_foot_lat']['units'] = 'degrees'
attributes['pass_foot_long'] = {}
attributes['pass_foot_long']['long_name'] = 'Passive Footprint Longitude'
attributes['pass_foot_long']['units'] = 'degrees'
attributes['pass_foot_synth_elev'] = {}
attributes['pass_foot_synth_elev']['long_name'] = ('Passive Footprint '
'Synthesized Elevation')
attributes['pass_foot_synth_elev']['units'] = 'meters'
#-- Defining the HDF5 dataset variables
h5 = {}
#-- Defining data variables
for key in ['elevation','longitude','latitude']:
val = ILATM1b_MDS[key]
h5[key] = fileID.create_dataset(key, (n_records,),
data=val, dtype=val.dtype, compression='gzip')
#-- add HDF5 variable attributes
for att_name,att_val in attributes[key].items():
h5[key].attrs[att_name] = att_val
#-- instrument parameter variables
instrument_parameters = ['rel_time','xmt_sigstr','rcv_sigstr','azimuth',
'pitch','roll','gps_pdop','pulse_width','passive_sig','pass_foot_lat',
'pass_foot_long','pass_foot_synth_elev','time_hhmmss']
for key in [p for p in instrument_parameters if p in ILATM1b_MDS.keys()]:
val = ILATM1b_MDS[key]
h5[key] = fileID.create_dataset('instrument_parameters/{0}'.format(key),
(n_records,), data=val, dtype=val.dtype, compression='gzip')
#-- add HDF5 variable attributes
for att_name,att_val in attributes[key].items():
h5[key].attrs[att_name] = att_val
#-- Defining global attributes for output HDF5 file
fileID.attrs['featureType'] = 'trajectory'
fileID.attrs['title'] = 'ATM Qfit Elevation and Return Strength'
fileID.attrs['short_name'] = "L1B_QFIT"
fileID.attrs['comment'] = ('Operation IceBridge products may include test '
'flight data that are not useful for research and scientific analysis. '
'Test flights usually occur at the beginning of campaigns. Users '
'should read flight reports for the flights that collected any of the '
'data they intend to use')
fileID.attrs['summary'] = ("This data set contains spot elevation "
"measurements of Arctic and Antarctic sea ice, and Greenland, other "
"Arctic and Antarctic terrestrial ice surface acquired using the NASA "
"Airborne Topographic Mapper (ATM) instrumentation. The data were "
"collected as part of NASA Operation IceBridge funded campaigns.")
nsidc_reference = {}
nsidc_reference['ILATM1B'] = 'http://nsidc.org/data/ilatm1b/versions/1'
nsidc_reference['BLATM1B'] = 'http://nsidc.org/data/BLATM1B'
nsidc_reference['ILNSA1B'] = 'http://nsidc.org/data/ilnsa1b/versions/1'
fileID.attrs['references'] = '{0}, {1}'.format('http://atm.wff.nasa.gov/',
nsidc_reference[MISSION])
fileID.attrs['date_created'] = time.strftime('%Y-%m-%d',time.localtime())
fileID.attrs['project'] = 'NASA Operation IceBridge'
fileID.attrs['instrument'] = 'Airborne Topographic Mapper (ATM)'
fileID.attrs['processing_level'] = '1b'
fileID.attrs['elevation_file'] = INPUT_FILE
fileID.attrs['geospatial_lat_min'] = ILATM1b_MDS['latitude'].min()
fileID.attrs['geospatial_lat_max'] = ILATM1b_MDS['latitude'].max()
fileID.attrs['geospatial_lon_min'] = ILATM1b_MDS['longitude'].min()
fileID.attrs['geospatial_lon_max'] = ILATM1b_MDS['longitude'].max()
fileID.attrs['geospatial_lat_units'] = "degrees_north"
fileID.attrs['geospatial_lon_units'] = "degrees_east"
fileID.attrs['geospatial_ellipsoid'] = "WGS84"
fileID.attrs['time_type'] = 'GPS'
fileID.attrs['date_type'] = 'packed_time'
#-- output QFIT header text
if HEADER:
fileID.attrs['header_text'] = HEADER
#-- leap seconds for converting from GPS time to UTC
S = calc_GPS_to_UTC(year,month,day,hour,minute,second)
args = (hour[0],minute[0],second[0]-S[0])
fileID.attrs['RangeBeginningTime']='{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(*args)
args = (hour[1],minute[1],second[1]-S[1])
fileID.attrs['RangeEndingTime']='{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(*args)
args = (year,month,day)
fileID.attrs['RangeBeginningDate'] = '{0:4d}:{1:02d}:{2:02d}'.format(*args)
fileID.attrs['RangeEndingDate'] = '{0:4d}:{1:02d}:{2:02d}'.format(*args)
time_coverage_duration = (second[1]-S[1]) - (second[0]-S[0])
fileID.attrs['DurationTime'] ='{0:0.0f}'.format(time_coverage_duration)
#-- Closing the HDF5 file
fileID.close()
#-- PURPOSE: create argument parser
def arguments():
parser = argparse.ArgumentParser(
description="""Reads IceBridge ATM QFIT binary files directly
from the National Snow and Ice Data Center (NSIDC) and
outputs as HDF5 files
"""
)
#-- command line parameters
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('products',
metavar='PRODUCTS', type=str, nargs='*', default=[],
choices=('ILATM1B','BLATM1B','ILNSA1B'),
help='Level-1b ATM products to convert')
#-- NASA Earthdata credentials
parser.add_argument('--user','-U',
type=str, default=os.environ.get('EARTHDATA_USERNAME'),
help='Username for NASA Earthdata Login')
parser.add_argument('--password','-W',
type=str, default=os.environ.get('EARTHDATA_PASSWORD'),
help='Password for NASA Earthdata Login')
parser.add_argument('--netrc','-N',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.path.join(os.path.expanduser('~'),'.netrc'),
help='Path to .netrc file for authentication')
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- years of data to run
parser.add_argument('--year','-Y',
type=int, nargs='+',
help='Years to run')
#-- subdirectories of data to run
parser.add_argument('--subdirectory','-S',
type=str, nargs='+',
help='subdirectories of data to run')
#-- connection timeout and number of retry attempts
parser.add_argument('--timeout','-T',
type=int, default=120,
help='Timeout in seconds for blocking operations')
parser.add_argument('--retry','-R',
type=int, default=5,
help='Connection retry attempts')
#-- clobber will overwrite the existing data
parser.add_argument('--clobber','-C',
default=False, action='store_true',
help='Overwrite existing data')
#-- verbose output of processing run
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the converted files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permissions mode of output files')
# return the parser
return parser
# This is the main part of the program that calls the individual functions
def main():
#-- Read the system arguments listed after the program
parser = arguments()
args,_ = parser.parse_known_args()
#-- NASA Earthdata hostname
URS = 'urs.earthdata.nasa.gov'
#-- build a urllib opener for NASA Earthdata
#-- check internet connection before attempting to run program
opener = ATM1b_QFIT.utilities.attempt_login(URS, username=args.user,
password=args.password, netrc=args.netrc)
#-- create logger for verbosity level
loglevel = logging.INFO if args.verbose else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- check internet connection before attempting to run program
HOST = 'https://n5eil01u.ecs.nsidc.org/'
if ATM1b_QFIT.utilities.check_connection(HOST):
nsidc_convert_ILATM1b(args.directory, args.products,
YEARS=args.year, SUBDIRECTORY=args.subdirectory,
TIMEOUT=args.timeout, RETRY=args.retry,
CLOBBER=args.clobber, MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main() | ATM1b-QFIT | /ATM1b-QFIT-1.0.1.tar.gz/ATM1b-QFIT-1.0.1/scripts/nsidc_convert_ILATM1b.py | nsidc_convert_ILATM1b.py |
from selenium.webdriver import ActionChains, Keys
from ats.common.log.log import LogTest
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "KeyboardCommon"
class KeyboardCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
self.keyboard = ActionChains(self.driver)
def keyboard_reset(self):
"""
重置键盘
:return: None
"""
LogTest.debug(TAG, "Reset keyboard")
self.keyboard.reset_actions()
return None
def keyboard_down(self, key):
"""
按下某个键
:param key: 键名
:return: None
"""
LogTest.debug(TAG, "Press down key: {}".format(key))
self.keyboard_reset()
self.keyboard.key_down(key)
self.keyboard.perform()
return None
def keyboard_up(self, key):
"""
松开某个键
:param key: 键名
:return: None
"""
LogTest.debug(TAG, "Release key: {}".format(key))
self.keyboard_reset()
self.keyboard.key_up(key)
self.keyboard.perform()
return None
def keyboard_press(self, key):
"""
按下某个键并松开
:param key: 键名
:return: None
"""
LogTest.debug(TAG, "Press key and release: {}".format(key))
self.keyboard_reset()
self.keyboard.key_down(key).key_up(key)
self.keyboard.perform()
return None
def keyboard_pause(self, seconds):
"""
键盘暂停
:param seconds: 秒数
:return: None
"""
LogTest.debug(TAG, "Keyboard pause {} seconds".format(seconds))
self.keyboard_reset()
self.keyboard.pause(seconds)
self.keyboard.perform()
return None
def keyboard_release(self):
"""
释放键盘
:return: None
"""
LogTest.debug(TAG, "Release keyboard")
self.keyboard_reset()
self.keyboard.release()
self.keyboard.perform()
return None
def keyboard_press_enter(self):
"""
按下回车键
:return: None
"""
LogTest.debug(TAG, "Press enter key")
self.keyboard_press(Keys.ENTER)
return None
def keyboard_press_tab(self):
"""
按下Tab键
:return: None
"""
LogTest.debug(TAG, "Press tab key")
self.keyboard_press(Keys.TAB)
return None
def keyboard_press_space(self):
"""
按下空格键
:return: None
"""
LogTest.debug(TAG, "Press space key")
self.keyboard_press(Keys.SPACE)
return None
def keyboard_press_escape(self):
"""
按下Esc键
:return: None
"""
LogTest.debug(TAG, "Press escape key")
self.keyboard_press(Keys.ESCAPE)
return None
def keyboard_press_backspace(self):
"""
按下Backspace键
:return: None
"""
LogTest.debug(TAG, "Press backspace key")
self.keyboard_press(Keys.BACKSPACE)
return None
def keyboard_press_delete(self):
"""
按下Delete键
:return: None
"""
LogTest.debug(TAG, "Press delete key")
self.keyboard_press(Keys.DELETE)
return None
def keyboard_press_control(self):
"""
按下Ctrl键
:return: None
"""
LogTest.debug(TAG, "Press control key")
self.keyboard_press(Keys.CONTROL)
return None
def keyboard_press_alt(self):
"""
按下Alt键
:return: None
"""
LogTest.debug(TAG, "Press alt key")
self.keyboard_press(Keys.ALT)
return None
def keyboard_press_shift(self):
"""
按下Shift键
:return: None
"""
LogTest.debug(TAG, "Press shift key")
self.keyboard_press(Keys.SHIFT)
return None
def keyboard_ctrl_a(self, loc):
"""
Ctrl+A
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+a")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "a")
return None
def keyboard_ctrl_c(self, loc):
"""
Ctrl+C
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+c")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "c")
return None
def keyboard_ctrl_v(self, loc):
"""
Ctrl+V
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+v")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "v")
return None
def keyboard_ctrl_x(self, loc):
"""
Ctrl+X
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+x")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "x")
return None
def keyboard_ctrl_z(self, loc):
"""
Ctrl+Z
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+z")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "z")
return None
def keyboard_ctrl_s(self, loc):
"""
Ctrl+S
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+s")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "s")
return None
def keyboard_ctrl_f(self, loc):
"""
Ctrl+F
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Press ctrl+f")
element = self.browser_locate(loc)
element.send_keys(Keys.CONTROL, "f")
return None | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/keyboard.py | keyboard.py |
import selenium.webdriver.support.expected_conditions as ec
from ats.common.log.log import LogTest
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "EcCommon"
class EcCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
def verify_title_is(self, title):
"""
验证标题为
:param title: 标题
:return: True or False
"""
var = ec.title_is(title)(self.driver)
LogTest.debug(TAG, "Verify title is: {}, result: {}".format(title, var))
return var
def verify_title_contains(self, title):
"""
验证标题包含
:param title: 标题
:return: True or False
"""
var = ec.title_contains(title)(self.driver)
LogTest.debug(TAG, "Verify title contains: {}, result: {}".format(title, var))
return var
def verify_presence_of_element_located(self, loc):
"""
验证元素存在
:param loc: 元素定位
:return: True or False
"""
var = ec.presence_of_element_located(loc)(self.driver)
LogTest.debug(TAG, "Verify element located: {}, result: {}".format(loc, var))
return var
def verify_url_contains(self, url):
"""
验证url包含
:param url: url
:return: True or False
"""
var = ec.url_contains(url)(self.driver)
LogTest.debug(TAG, "Verify url contains: {}, result: {}".format(url, var))
return var
def verify_url_matches(self, pattern):
"""
验证url匹配
:param pattern: 匹配字段
:return: True or False
"""
var = ec.url_matches(pattern)(self.driver)
LogTest.debug(TAG, "Verify url matches: {}, result: {}".format(pattern, var))
return var
def verify_url_to_be(self, url):
"""
验证url等于
:param url: url
:return: True or False
"""
var = ec.url_to_be(url)(self.driver)
LogTest.debug(TAG, "Verify url to be: {}, result: {}".format(url, var))
return var
def verify_url_changes(self, url):
"""
验证url变化
:param url: url
:return: True or False
"""
var = ec.url_changes(url)(self.driver)
LogTest.debug(TAG, "Verify url changes: {}, result: {}".format(url, var))
return var
def verify_visibility_of(self, loc):
"""
验证元素可见
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.visibility_of(element)(self.driver)
LogTest.debug(TAG, "Verify element visibility: {}, result: {}".format(loc, var))
return var
def verify_presence_of_all_elements_located(self, loc):
"""
验证所有元素存在
:param loc: 元素定位
:return: True or False
"""
var = ec.presence_of_all_elements_located(loc)(self.driver)
LogTest.debug(TAG, "Verify all elements located: {}, result: {}".format(loc, var))
return var
def verify_visibility_of_any_elements_located(self, loc):
"""
验证任意元素可见
:param loc: 元素定位
:return: True or False
"""
var = ec.visibility_of_any_elements_located(loc)(self.driver)
LogTest.debug(TAG, "Verify any element visibility: {}, result: {}".format(loc, var))
return var
def verify_visibility_of_all_elements_located(self, loc):
"""
验证所有元素可见
:param loc: 元素定位
:return: True or False
"""
var = ec.visibility_of_all_elements_located(loc)(self.driver)
LogTest.debug(TAG, "Verify all elements visibility: {}, result: {}".format(loc, var))
return var
def verify_text_to_be_present_in_element(self, loc, text):
"""
验证元素文本包含
:param loc: 元素定位
:param text: 文本
:return: True or False
"""
var = ec.text_to_be_present_in_element(loc, text)(self.driver)
LogTest.debug(TAG, "Verify text to be present in element: {}, result: {}".format(loc, var))
return var
def verify_text_to_be_present_in_element_value(self, loc, text):
"""
验证元素value包含
:param loc: 元素定位
:param text: 文本
:return: True or False
"""
var = ec.text_to_be_present_in_element_value(loc, text)(self.driver)
LogTest.debug(TAG, "Verify text to be present in element value: {}, result: {}".format(loc, var))
return var
def verify_text_to_be_present_in_element_attribute(self, loc, attribute, text):
"""
验证元素属性包含
:param loc: 元素定位
:param attribute: 属性
:param text: 文本
:return: True or False
"""
var = ec.text_to_be_present_in_element_attribute(loc, attribute, text)(self.driver)
LogTest.debug(TAG, "Verify text to be present in element attribute: {}, result: {}".format(loc, var))
return var
def verify_frame_to_be_available_and_switch_to_it(self, loc):
"""
验证frame可用并切换
:param loc: 元素定位
:return: True or False
"""
var = ec.frame_to_be_available_and_switch_to_it(loc)(self.driver)
LogTest.debug(TAG, "Verify frame to be available and switch to it: {}, result: {}".format(loc, var))
return var
def verify_invisibility_of(self, loc):
"""
验证元素不可见
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.invisibility_of_element(element)(self.driver)
LogTest.debug(TAG, "Verify element invisibility: {}, result: {}".format(loc, var))
return var
def verify_element_exists(self, loc):
"""
验证元素存在
:param loc: 元素定位
:return: True or False
"""
var = False
elements = self.driver.find_elements(*loc)
if elements:
var = True
LogTest.debug(TAG, "Verify element exists: {}, result: {}".format(loc, var))
return var
def verify_element_not_exist(self, loc):
"""
验证元素不存在
:param loc: 元素定位
:return: True or False
"""
var = False
elements = self.driver.find_elements(*loc)
if not elements:
var = True
LogTest.debug(TAG, "Verify element not exist: {}, result: {}".format(loc, var))
return var
def verify_element_to_be_clickable(self, loc):
"""
验证元素可点击
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.element_to_be_clickable(element)(self.driver)
LogTest.debug(TAG, "Verify element to be clickable: {}, result: {}".format(loc, var))
return var
def verify_staleness_of(self, loc):
"""
验证元素不可用
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.staleness_of(element)(self.driver)
LogTest.debug(TAG, "Verify element staleness: {}, result: {}".format(loc, var))
return var
def verify_element_to_be_selected(self, loc):
"""
验证元素被选中
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.element_to_be_selected(element)(self.driver)
LogTest.debug(TAG, "Verify element to be selected: {}, result: {}".format(loc, var))
return var
def verify_element_selection_state_to_be(self, loc, state):
"""
验证元素选中状态
:param loc: 元素定位
:param state: 选中状态
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.element_selection_state_to_be(element, state)(self.driver)
LogTest.debug(TAG, "Verify element selection state to be: {}, result: {}".format(loc, var))
return var
def verify_number_of_windows_to_be(self, num):
"""
验证窗口数量
:param num: 窗口数量
:return: True or False
"""
var = ec.number_of_windows_to_be(num)(self.driver)
LogTest.debug(TAG, "Verify number of windows to be: {}, result: {}".format(num, var))
return var
def verify_new_window_is_opened(self, handles):
"""
验证新窗口打开
:param handles: 窗口句柄
:return: True or False
"""
var = ec.new_window_is_opened(handles)(self.driver)
LogTest.debug(TAG, "Verify new window is opened: {}, result: {}".format(handles, var))
return var
def verify_alert_is_present(self):
"""
验证alert弹窗存在
:return: True or False
"""
var = ec.alert_is_present()(self.driver)
LogTest.debug(TAG, "Verify alert is present: {}".format(var))
return var
def verify_element_attribute_to_include(self, loc, attribute):
"""
验证元素属性包含
:param loc: 元素定位
:param attribute: 属性
:return: True or False
"""
element = self.browser_locate(loc)
var = ec.element_attribute_to_include(element, attribute)(self.driver)
LogTest.debug(TAG, "Verify element attribute to include: {}, result: {}".format(loc, var))
return var | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/ec.py | ec.py |
from selenium.webdriver.support.select import Select
from ats.common.log.log import LogTest
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "SelectCommon"
class SelectCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
def get_select_element(self, loc):
"""
获取下拉框元素对象
:param loc: 元素定位
:return: 下拉框元素对象
"""
element = self.browser_locate(loc)
var = Select(element)
LogTest.debug(TAG, "Get select object")
return var
def get_select_options(self, loc):
"""
获取下拉框选项
:param loc: 元素定位
:return: 下拉框选项
"""
select = self.get_select_element(loc)
var = select.options
LogTest.debug(TAG, "Select options: {}".format(var))
return var
def get_selected_options(self, loc):
"""
获取已被选中的下拉框选项
:param loc: 元素定位
:return: 已被选中的下拉框选项
"""
select = self.get_select_element(loc)
var = select.all_selected_options
LogTest.debug(TAG, "All selected options: {}".format(var))
return var
def get_first_selected_option(self, loc):
"""
获取第一个已被选中的下拉框选项
:param loc: 元素定位
:return: 第一个已被选中的下拉框选项
"""
select = self.get_select_element(loc)
var = select.first_selected_option
LogTest.debug(TAG, "First selected option: {}".format(var))
return var
def select_by_value(self, loc, value):
"""
通过value选择下拉框选项
:param loc: 元素定位
:param value: value
:return: None
"""
select = self.get_select_element(loc)
select.select_by_value(value)
LogTest.debug(TAG, "Select by value: {}".format(value))
return None
def select_by_index(self, loc, index):
"""
通过index选择下拉框选项
:param loc: 元素定位
:param index: index
:return: None
"""
select = self.get_select_element(loc)
select.select_by_index(index)
LogTest.debug(TAG, "Select by index: {}".format(index))
return None
def select_by_visible_text(self, loc, text):
"""
通过index选择下拉框选项
:param loc: 元素定位
:param text: text
:return: None
"""
select = self.get_select_element(loc)
select.select_by_visible_text(text)
LogTest.debug(TAG, "Select by text: {}".format(text))
return None
def deselect_all(self, loc):
"""
取消所有选择
:param loc: 元素定位
:return: None
"""
select = self.get_select_element(loc)
select.deselect_all()
LogTest.debug(TAG, "Deselect all")
return None
def deselect_by_value(self, loc, value):
"""
通过value取消选择下拉框选项
:param loc: 元素定位
:param value: value
:return: None
"""
select = self.get_select_element(loc)
select.deselect_by_value(value)
LogTest.debug(TAG, "Deselect by value: {}".format(value))
return None
def deselect_by_index(self, loc, index):
"""
通过index取消选择下拉框选项
:param loc: 元素定位
:param index: index
:return: None
"""
select = self.get_select_element(loc)
select.deselect_by_index(index)
LogTest.debug(TAG, "Deselect by index: {}".format(index))
return None
def deselect_by_visible_text(self, loc, text):
"""
通过index取消选择下拉框选项
:param loc: 元素定位
:param text: text
:return: None
"""
select = self.get_select_element(loc)
select.deselect_by_visible_text(text)
LogTest.debug(TAG, "Deselect by text: {}".format(text))
return None | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/select.py | select.py |
from selenium.webdriver import ActionChains
from ats.common.log.log import LogTest
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "MouseCommon"
class MouseCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
self.mouse = ActionChains(self.driver)
def mouse_reset(self):
"""
重置鼠标
:return: None
"""
LogTest.debug(TAG, "Reset mouse")
self.mouse.reset_actions()
return None
def mouse_click(self, loc):
"""
鼠标左键单击
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse click")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.click(element)
self.mouse.perform()
return None
def mouse_click_and_hold(self, loc):
"""
鼠标左键单击并按住
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse click and hold")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.click_and_hold(element)
self.mouse.perform()
return None
def mouse_right_click(self, loc):
"""
鼠标右键单击
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse right click")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.context_click(element)
self.mouse.perform()
return None
def mouse_double_click(self, loc):
"""
鼠标左键双击
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse double click")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.double_click(element)
self.mouse.perform()
return None
def mouse_double_click_by_screen_offset(self, x_percent, y_percent):
"""
鼠标双击指定位置
:param x_percent: x轴偏移量
:param y_percent: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse double click by screen offset")
self.mouse_reset()
browser_size = self.driver.get_window_size()
x = browser_size["width"] * x_percent
y = browser_size["height"] * y_percent
self.mouse_move_by_offset(x, y)
self.mouse.double_click()
self.mouse.perform()
return None
def mouse_click_by_screen_offset(self, x_percent, y_percent):
"""
鼠标单击指定位置
:param x_percent: x轴偏移量
:param y_percent: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse double click by screen offset")
self.mouse_reset()
browser_size = self.driver.get_window_size()
x = browser_size["width"] * x_percent
y = browser_size["height"] * y_percent
self.mouse_move_by_offset(x, y)
self.mouse.click()
self.mouse.perform()
return None
def mouse_drag_and_drop(self, source_loc, target_loc):
"""
鼠标拖拽到指定元素
:param source_loc: 元素定位
:param target_loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse drag and drop")
source = self.browser_locate(source_loc)
target = self.browser_locate(target_loc)
self.mouse_reset()
self.mouse.drag_and_drop(source, target)
self.mouse.perform()
return None
def mouse_drag_and_drop_by_offset(self, loc, x_offset, y_offset):
"""
鼠标拖拽到指定元素的偏移量
:param loc: 元素定位
:param x_offset: x轴偏移量
:param y_offset: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse drag and drop by offset")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.drag_and_drop_by_offset(element, x_offset, y_offset)
self.mouse.perform()
return None
def mouse_move_by_offset(self, x_offset, y_offset):
"""
鼠标移动到指定位置
:param x_offset: x轴偏移量
:param y_offset: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse move by offset")
self.mouse_reset()
self.mouse.move_by_offset(x_offset, y_offset)
self.mouse.perform()
return None
def mouse_move_to_element(self, loc):
"""
鼠标移动到元素
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse move to element")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element(element)
self.mouse.perform()
return None
def mouse_move_to_element_with_offset(self, loc, x_offset, y_offset):
"""
鼠标移动指定元素的偏移量
:param loc: 元素定位
:param x_offset: x轴偏移量
:param y_offset: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse move to element with offset")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element_with_offset(element, x_offset, y_offset)
self.mouse.perform()
return None
def mouse_pause(self, seconds):
"""
鼠标暂停
:param seconds: 秒数
:return: None
"""
LogTest.debug(TAG, "Mouse pause")
self.mouse_reset()
self.mouse.pause(seconds)
self.mouse.perform()
return None
def mouse_release(self):
"""
鼠标释放
:return: None
"""
LogTest.debug(TAG, "Mouse release")
self.mouse_reset()
self.mouse.release()
self.mouse.perform()
return None
def mouse_move_to_element_center(self, loc):
"""
鼠标移动到元素中心
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse move to element center")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element_with_offset(element, element.size["width"] / 2, element.size["height"] / 2)
self.mouse.perform()
return None
def mouse_move_to_element_top_left(self, loc):
"""
鼠标移动到元素左上角
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse move to element top left")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element_with_offset(element, 0, 0)
self.mouse.perform()
return None
def mouse_move_to_element_top_right(self, loc):
"""
鼠标移动到元素右上角
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse move to element top right")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element_with_offset(element, element.size["width"], 0)
self.mouse.perform()
return None
def mouse_move_to_element_bottom_left(self, loc):
"""
鼠标移动到元素左下角
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse move to element bottom left")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element_with_offset(element, 0, element.size["height"])
self.mouse.perform()
return None
def mouse_move_to_element_bottom_right(self, loc):
"""
鼠标移动到元素右下角
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse move to element bottom right")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element_with_offset(element, element.size["width"], element.size["height"])
self.mouse.perform()
return None
def mouse_scroll_to_element(self, loc):
"""
鼠标滚动到元素
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Mouse scroll to element")
element = self.browser_locate(loc)
self.mouse_reset()
self.mouse.move_to_element(element)
self.mouse.perform()
return None
def mouse_scroll_by_amount(self, x_offset, y_offset):
"""
鼠标滚动指定偏移量
:param x_offset: x轴偏移量
:param y_offset: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse scroll by amount")
self.mouse_reset()
self.mouse.move_by_offset(x_offset, y_offset)
self.mouse.perform()
return None
def mouse_scroll_from_origin(self, scroll_orig, x_offset, y_offset):
"""
鼠标滚动指定偏移量
:param scroll_orig: 滚动起始点
:param x_offset: x轴偏移量
:param y_offset: y轴偏移量
:return: None
"""
LogTest.debug(TAG, "Mouse scroll from origin")
self.mouse_reset()
self.mouse.scroll_from_origin(scroll_orig, x_offset, y_offset)
self.mouse.perform()
return None | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/mouse.py | mouse.py |
from selenium.common import NoAlertPresentException
from ats.common.log.log import LogTest
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "AlertCommon"
class AlertCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
self.alert = None
def get_alert_text(self):
"""
获取警告框的文本信息
:return: 警告框的文本信息
"""
self.alert = self.driver.switch_to_alert()
var = self.alert.text
LogTest.debug(TAG, "Get alert text, text is: {}".format(var))
return var
def alert_accept(self):
"""
接受警告框
:return: None
"""
LogTest.debug(TAG, "Accept alert")
self.alert = self.driver.switch_to_alert()
self.alert.accept()
return None
def alert_dismiss(self):
"""
取消警告框
:return: None
"""
LogTest.debug(TAG, "Dismiss alert")
self.alert = self.driver.switch_to_alert()
self.alert.dismiss()
return None
def alert_send_keys(self, text):
"""
输入文本到警告框
:param text: 输入的文本
:return: None
"""
LogTest.debug(TAG, "Input text to alert, text is: {}".format(text))
self.alert = self.driver.switch_to_alert()
self.alert.send_keys(text)
return None
def is_alert_exist(self):
"""
判断警告框是否存在
:return: True or False
"""
try:
self.driver.switch_to_alert()
LogTest.debug(TAG, "Alert is exist")
return True
except NoAlertPresentException:
LogTest.error(TAG, "Alert is not exist")
return False
def is_alert_exist_accept(self):
"""
判断警告框是否存在,存在则接受
:return: None
"""
if self.is_alert_exist():
LogTest.debug(TAG, "Alert is exist, accept")
self.alert_accept()
return None
def is_alert_exist_dismiss(self):
"""
判断警告框是否存在,存在则取消
:return: None
"""
if self.is_alert_exist():
LogTest.debug(TAG, "Alert is exist, dismiss")
self.alert_dismiss()
return None
def is_alert_exist_send_keys(self, text):
"""
判断警告框是否存在,存在则输入文本
:param text: 输入的文本
:return: None
"""
if self.is_alert_exist():
LogTest.debug(TAG, "Alert is exist, input text to alert, text is: {}".format(text))
self.alert_send_keys(text)
return None
def is_alert_exist_get_text(self):
"""
判断警告框是否存在,存在则获取文本信息
:return: 警告框的文本信息
"""
if self.is_alert_exist():
var = self.get_alert_text()
LogTest.debug(TAG, "Alert is exist, get alert text, text is: {}".format(var))
return var
return None | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/alert.py | alert.py |
from selenium.webdriver.remote.webelement import WebElement
from ats.common.log.log import LogTest
TAG = "DriverCommon"
class DriverCommon(object):
def __init__(self, driver):
self.driver = driver
def get_browser_name(self):
"""
获取浏览器名称
:return: 浏览器名称
"""
var = self.driver.name
LogTest.debug(TAG, "Browser name is: {}".format(var))
return var
def open_url(self, url):
"""
打开网页
:param url: 网页地址
:return: None
"""
LogTest.debug(TAG, "Visit url: {}".format(url))
self.driver.get(url)
return None
def get_page_title(self):
"""
获取网页标题
:return: 网页标题
"""
var = self.driver.title
LogTest.debug(TAG, "Page title is: {}".format(var))
return var
def execute_js(self, script, *args):
"""
执行js脚本
:param script: js脚本
:param args: 参数
:return: 执行结果
"""
var = self.driver.execute_script(script, *args)
LogTest.debug(TAG, "Execute js script: {}".format(script))
return var
def execute_async_js(self, script, *args):
"""
执行异步js脚本
:param script: js脚本
:param args: 参数
:return: 执行结果
"""
var = self.driver.execute_async_script(script, *args)
LogTest.debug(TAG, "Execute async js script: {}".format(script))
return var
def get_current_url(self):
"""
获取当前网页地址
:return: 网页地址
"""
var = self.driver.current_url
LogTest.debug(TAG, "Current url is: {}".format(var))
return var
def get_page_source(self):
"""
获取网页源码
:return: 网页源码
"""
var = self.driver.page_source
LogTest.debug(TAG, "Get page source: {}".format(var))
return var
def close_browser(self):
"""
关闭浏览器
:return: None
"""
LogTest.debug(TAG, "Close browser")
self.driver.close()
return None
def quit_browser(self):
"""
退出浏览器
:return: None
"""
LogTest.debug(TAG, "Quit browser")
self.driver.quit()
return None
def get_current_window_handle(self):
"""
获取当前窗口句柄
:return: 窗口句柄
"""
var = self.driver.current_window_handle
LogTest.debug(TAG, "Current window handle is: {}".format(var))
return var
def get_window_handles(self):
"""
获取所有窗口句柄
:return: 窗口句柄列表
"""
var = self.driver.window_handles
LogTest.debug(TAG, "Window handles is: {}".format(var))
return var
def maximize_window(self):
"""
最大化窗口
:return: None
"""
LogTest.debug(TAG, "Maximize window")
self.driver.maximize_window()
return None
def minimize_window(self):
"""
最小化窗口
:return: None
"""
LogTest.debug(TAG, "Minimize window")
self.driver.minimize_window()
return None
def full_screen_window(self):
"""
全屏窗口
:return: None
"""
LogTest.debug(TAG, "Full screen window")
self.driver.fullscreen_window()
return None
def print_page(self):
"""
打印网页
:return: None
"""
var = self.driver.print_page()
LogTest.debug(TAG, "Print page: {}".format(var))
return var
def switch_to_active_element(self):
"""
切换到活动元素
:return: 活动元素
"""
var = self.driver.switch_to.active_element
LogTest.debug(TAG, "Switch to active element: {}".format(var))
return var
def switch_to_alert(self):
"""
切换到警告框
:return: alert
"""
var = self.driver.switch_to.alert
LogTest.debug(TAG, "Switch to alert: {}".format(var))
return var
def switch_to_default_content(self):
"""
切换回默认内容
:return: None
"""
LogTest.debug(TAG, "Switch to default content")
self.driver.switch_to.default_content()
return None
def switch_to_frame(self, frame_reference):
"""
切换到iframe
:param frame_reference: iframe
:return: None
"""
LogTest.debug(TAG, "Switch to frame: {}".format(frame_reference))
self.driver.switch_to.frame(frame_reference)
return None
def switch_to_parent_frame(self):
"""
切换到父iframe
:return: None
"""
LogTest.debug(TAG, "Switch to parent frame")
self.driver.switch_to.parent_frame()
return None
def switch_to_window(self, window_name):
"""
切换到窗口
:param window_name: 窗口名称
:return: None
"""
LogTest.debug(TAG, "Switch to window: {}".format(window_name))
self.driver.switch_to.window(window_name)
return None
def go_back(self):
"""
后退
:return: None
"""
LogTest.debug(TAG, "Go back")
self.driver.back()
return None
def go_forward(self):
"""
前进
:return: None
"""
LogTest.debug(TAG, "Go forward")
self.driver.forward()
return None
def refresh_browser(self):
"""
刷新
:return: None
"""
LogTest.debug(TAG, "Refresh browser")
self.driver.refresh()
return None
def get_cookies(self):
"""
获取所有的cookie
:return: cookies
"""
var = self.driver.get_cookies()
LogTest.debug(TAG, "Get cookies: {}".format(var))
return var
def get_cookie(self, name):
"""
获取cookie
:param name: cookie名称
:return: cookie
"""
var = self.driver.get_cookie(name)
LogTest.debug(TAG, "Get cookie: {}".format(var))
return var
def add_cookie(self, cookie_dict):
"""
添加cookie
:param cookie_dict: cookie字典
:return: None
"""
LogTest.debug(TAG, "Add cookie: {}".format(cookie_dict))
self.driver.add_cookie(cookie_dict)
return None
def delete_cookie(self, name):
"""
删除cookie
:param name: cookie名称
:return: None
"""
LogTest.debug(TAG, "Delete cookie: {}".format(name))
self.driver.delete_cookie(name)
return None
def delete_all_cookies(self):
"""
删除所有cookie
:return: None
"""
LogTest.debug(TAG, "Delete all cookies")
self.driver.delete_all_cookies()
return None
def set_implicitly_wait(self, seconds):
"""
设置隐式等待时间
:param seconds: 秒
:return: None
"""
LogTest.debug(TAG, "Set implicitly wait: {}s".format(seconds))
self.driver.implicitly_wait(seconds)
return None
def set_script_timeout(self, seconds):
"""
设置脚本超时时间
:param seconds: 秒
:return: None
"""
LogTest.debug(TAG, "Set script timeout: {}s".format(seconds))
self.driver.set_script_timeout(seconds)
return None
def set_page_load_timeout(self, seconds):
"""
设置页面加载超时时间
:param seconds: 秒
:return: None
"""
LogTest.debug(TAG, "Set page load timeout: {}s".format(seconds))
self.driver.set_page_load_timeout(seconds)
return None
def get_timeout(self):
"""
获取超时时间
:return: 超时时间
"""
var = self.driver.timeouts
LogTest.debug(TAG, "Get timeout: {}".format(var))
return var
def browser_locate(self, loc):
"""
浏览器定位
:param loc: 定位
:return: 定位元素
"""
LogTest.debug(TAG, "Browser locate: {}".format(loc))
def _func(_loc):
elements = self.driver.find_elements(*_loc)
if 1 == len(elements):
return elements[0]
return elements
if isinstance(loc, tuple):
loc = _func(loc)
elif isinstance(loc, WebElement):
pass
else:
LogTest.error(TAG, "Loc must be tuple or WebElement")
raise TypeError("loc must be tuple or WebElement")
return loc
def get_desired_capabilities(self):
"""
获取当前浏览器驱动配置
:return: 浏览器配置
"""
var = self.driver.desired_capabilities
LogTest.debug(TAG, "Get desired capabilities: {}".format(var))
return var
def get_capabilities(self):
"""
获取浏览器驱动配置
:return: 浏览器配置
"""
var = self.driver.capabilities
LogTest.debug(TAG, "Get capabilities: {}".format(var))
return var
def get_browser_screenshot_as_file(self, filename):
"""
获取浏览器截图并保存
:param filename: 文件名
:return: None
"""
LogTest.debug(TAG, "Get browser screenshot as file: {}".format(filename))
self.driver.get_screenshot_as_file(filename)
return None
def get_browser_screenshot_as_png(self):
"""
获取浏览器截图二进制数据
:return: 截图
"""
var = self.driver.get_screenshot_as_png()
LogTest.debug(TAG, "Get browser screenshot as png")
return var
def get_browser_screenshot_as_base64(self):
"""
获取浏览器截图base64编码
:return: 截图
"""
var = self.driver.get_screenshot_as_base64()
LogTest.debug(TAG, "Get browser screenshot as base64")
return var
def save_screenshot(self, filename):
"""
保存截图
:param filename: 文件名
:return: None
"""
LogTest.debug(TAG, "Save screenshot: {}".format(filename))
self.driver.save_screenshot(filename)
return None
def set_window_size(self, width, height):
"""
设置窗口大小
:param width: 宽度
:param height: 高度
:return: None
"""
LogTest.debug(TAG, "Set window size: {}x{}".format(width, height))
self.driver.set_window_size(width, height)
return None
def get_window_size(self):
"""
获取窗口大小
:return: 窗口大小
"""
var = self.driver.get_window_size()
LogTest.debug(TAG, "Get window size: {}".format(var))
return var
def set_window_position(self, x, y):
"""
设置窗口位置
:param x: x坐标
:param y: y坐标
:return: None
"""
LogTest.debug(TAG, "Set window position: {},{}".format(x, y))
self.driver.set_window_position(x, y)
return None
def get_window_position(self):
"""
获取窗口位置
:return: 窗口位置
"""
var = self.driver.get_window_position()
LogTest.debug(TAG, "Get window position: {}".format(var))
return var
def get_window_rect(self):
"""
获取窗口位置和大小
:return: 窗口位置和大小
"""
var = self.driver.get_window_rect()
LogTest.debug(TAG, "Get window rect: {}".format(var))
return var
def set_window_rect(self, x, y, width, height):
"""
设置窗口位置和大小
:param x: x坐标
:param y: y坐标
:param width: 宽度
:param height: 高度
:return: None
"""
LogTest.debug(TAG, "Set window rect: {},{},{}x{}".format(x, y, width, height))
self.driver.set_window_rect(x, y, width, height)
return None
def get_orientation(self):
"""
获取屏幕方向
:return: 屏幕方向
"""
var = self.driver.orientation
LogTest.debug(TAG, "Get orientation: {}".format(var))
return var
def get_application_cache(self):
"""
获取应用缓存
:return: 应用缓存
"""
var = self.driver.application_cache
LogTest.debug(TAG, "Get application cache: {}".format(var))
return var
def get_log_types(self):
"""
获取浏览器日志类型
:return: 日志类型
"""
var = self.driver.log_types
LogTest.debug(TAG, "Get log types: {}".format(var))
return var
def get_browser_log(self, log_type):
"""
获取浏览器日志
:param log_type: 日志类型
:return: 日志
"""
var = self.driver.get_log(log_type)
LogTest.debug(TAG, "Get browser log: {}".format(var))
return var | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/driver.py | driver.py |
import base64
from ats.common.log.log import LogTest
from ats.common.ocr.ocr_common import OcrCommon
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "UiCommon"
class UiCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
def ui_get_img_text(self, loc, cut_area=None):
"""
获取图片中的文字
:param loc: 图片元素定位
:param cut_area: 裁切范围
:return: 图片中的文字
"""
element = self.browser_locate(loc)
image_base64 = element.screenshot_as_base64
image_bytes = base64.b64decode(image_base64)
res = OcrCommon.get_ocr_result(image_bytes, cut_area)
LogTest.debug(TAG, "Get img text, result: {}".format(res))
return res
def ui_get_canvas_text(self, loc, w1=0, w2=1, h1=0, h2=1):
"""
获取canvas中的文字
:param loc: canvas元素定位
:param w1: 截取宽起始
:param w2: 截取宽结尾
:param h1: 截取宽起始
:param h2: 截取高结尾
:return: canvas中的文字
"""
element = self.browser_locate(loc)
width = eval(element.get_attribute("width"))
height = eval(element.get_attribute("height"))
cut_area = (w1 * width, h1 * height, w2 * width, h2 * height)
img_bytes = self.get_canvas_bytes(loc)
res = OcrCommon.get_ocr_result(img_bytes, cut_area)
LogTest.debug(TAG, "Get canvas text, result: {}".format(res))
return res
def get_canvas_bytes(self, loc):
"""
下载canvas图片
:param loc: canvas元素定位
:return: 图片路径
"""
element = self.browser_locate(loc)
js = "var canvas = arguments[0];" \
"var context = canvas.getContext('2d');" \
"context.globalCompositeOperation='destination-over';" \
"context.fillStyle='white';" \
"context.fillRect(0,0,canvas.width,canvas.height);" \
"context.globalCompositeOperation='source-over';" \
"return canvas.toDataURL('image/png');"
image_data = self.execute_js(js, element)
image_base64 = image_data.split(",")[1]
image_bytes = base64.b64decode(image_base64)
return image_bytes | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/ui.py | ui.py |
from ats.common.log.log import LogTest
from ats.web_test.selenium.selenium_common.driver import DriverCommon
TAG = "ElementCommon"
class ElementCommon(DriverCommon):
def __init__(self, driver):
super().__init__(driver)
def get_tag_name(self, loc):
"""
获取元素的标签名
:param loc: 元素定位
:return: 标签名
"""
element = self.browser_locate(loc)
var = element.tag_name
LogTest.debug(TAG, "Get element {} tag name: {}".format(loc, var))
return var
def get_text(self, loc):
"""
获取元素的文本
:param loc: 元素定位
:return: 元素文本
"""
element = self.browser_locate(loc)
var = element.text
LogTest.debug(TAG, "Get element {} text: {}".format(loc, var))
return var
def click(self, loc):
"""
点击元素
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Click element: {}".format(loc))
element = self.browser_locate(loc)
element.click()
return None
def submit(self, loc):
"""
提交表单
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Submit form")
element = self.browser_locate(loc)
element.submit()
return None
def clear(self, loc):
"""
清除元素内容
:param loc: 元素定位
:return: None
"""
LogTest.debug(TAG, "Clear element")
element = self.browser_locate(loc)
element.clear()
return None
def get_property(self, loc, name):
"""
获取元素的属性值
:param loc: 元素定位
:param name: 属性名
:return: 属性值
"""
element = self.browser_locate(loc)
var = element.get_property(name)
LogTest.debug(TAG, "Get {} property: {}".format(name, var))
return var
def get_dom_attribute(self, loc, name):
"""
获取元素的dom属性值
:param loc: 元素定位
:param name: dom属性名
:return: dom属性值
"""
element = self.browser_locate(loc)
var = element.get_dom_attribute(name)
LogTest.debug(TAG, "Get {} dom attribute: {}".format(name, var))
return var
def get_attribute(self, loc, name):
"""
获取元素的属性值
:param loc: 元素定位
:param name: 属性名
:return: 属性值
"""
element = self.browser_locate(loc)
var = element.get_attribute(name)
LogTest.debug(TAG, "Get {} attribute: {}".format(name, var))
return var
def check_is_selected(self, loc):
"""
检查元素是否被选中
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = element.is_selected()
LogTest.debug(TAG, "Is element {} selected: {}".format(loc, var))
return var
def check_is_enabled(self, loc):
"""
检查元素是否可用
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = element.is_enabled()
LogTest.debug(TAG, "Is element {} enabled: {}".format(loc, var))
return var
def send_keys(self, loc, var):
"""
向元素输入内容
:param loc: 元素定位
:param var: 输入内容
:return: None
"""
element = self.browser_locate(loc)
element.send_keys(var)
LogTest.debug(TAG, "Input {} to element {}".format(var, loc))
return None
def check_is_displayed(self, loc):
"""
检查元素是否可见
:param loc: 元素定位
:return: True or False
"""
element = self.browser_locate(loc)
var = element.is_displayed()
LogTest.debug(TAG, "Is element {} displayed: {}".format(loc, var))
return var
def scrolled_into_view(self, loc):
"""
滚动到元素可见
:param loc: 元素定位
:return: None
"""
element = self.browser_locate(loc)
var = element.location_once_scrolled_into_view
LogTest.debug(TAG, "Scroll element {} into view: {}".format(loc, var))
return var
def get_size(self, loc):
"""
获取元素的大小
:param loc: 元素定位
:return: 元素大小
"""
element = self.browser_locate(loc)
var = element.size
LogTest.debug(TAG, "Get element {} size: {}".format(loc, var))
return var
def get_css_value(self, loc, name):
"""
获取元素的css属性值
:param loc: 元素定位
:param name: css属性名
:return: css属性值
"""
element = self.browser_locate(loc)
var = element.value_of_css_property(name)
LogTest.debug(TAG, "Get {} css value: {}".format(name, var))
return var
def get_location(self, loc):
"""
获取元素的位置
:param loc: 元素定位
:return: 元素位置
"""
element = self.browser_locate(loc)
var = element.location
LogTest.debug(TAG, "Get element {} location: {}".format(loc, var))
return var
def get_rect(self, loc):
"""
获取元素的位置和大小
:param loc: 元素定位
:return: 元素的位置和大小
"""
element = self.browser_locate(loc)
var = element.rect
LogTest.debug(TAG, "Get element {} rect: {}".format(loc, var))
return var
def screenshot_as_base64(self, loc):
"""
获取元素的截图base64编码
:param loc: 元素定位
:return: 元素截图
"""
element = self.browser_locate(loc)
var = element.screenshot_as_base64
LogTest.debug(TAG, "Get element {} screenshot: {}".format(loc, var))
return var
def screenshot_as_png(self, loc):
"""
获取元素的截图二进制数据
:param loc: 元素定位
:return: 元素截图
"""
element = self.browser_locate(loc)
var = element.screenshot_as_png
LogTest.debug(TAG, "Get element {} screenshot: {}".format(loc, var))
return var
def screenshot(self, loc, filename):
"""
截图
:param loc: 元素定位
:param filename: 截图文件名
:return: None
"""
element = self.browser_locate(loc)
element.screenshot(filename)
LogTest.debug(TAG, "Screenshot element {} to {}".format(loc, filename))
return None
def get_parent(self, loc):
"""
获取元素的父元素
:param loc: 元素定位
:return: 父元素
"""
element = self.browser_locate(loc)
var = element.parent
LogTest.debug(TAG, "Get element {} parent: {}".format(loc, var))
return var
def get_id(self, loc):
"""
获取元素的id
:param loc: 元素定位
:return: 元素id
"""
element = self.browser_locate(loc)
var = element.id
LogTest.debug(TAG, "Get element {} id: {}".format(loc, var))
return var
def element_locate(self, loc, child_loc):
"""
定位元素的子元素
:param loc: 元素定位
:param child_loc: 子元素定位
:return: 子元素
"""
element = self.browser_locate(loc)
child_elements = element.find_elements(*child_loc)
if 1 == len(child_elements):
LogTest.debug(TAG, "Locate element {}'s child element {}".format(loc, child_elements[0]))
return child_elements[0]
LogTest.debug(TAG, "Locate element {}'s child element {}".format(loc, child_elements))
return child_elements | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/web_test/selenium/selenium_common/element.py | element.py |
import requests
class HttpCommon(object):
def __init__(self, url, method, data=None, headers=None, cookies=None, files=None, timeout=10):
self.url = url
self.method = method
self.data = data
self.headers = headers
self.cookies = cookies
self.files = files
self.timeout = timeout
def send_request(self):
"""
发送请求
:return: 响应
"""
if self.method == "get":
response = requests.get(url=self.url, params=self.data, headers=self.headers, cookies=self.cookies,
timeout=self.timeout)
elif self.method == "post":
response = requests.post(url=self.url, data=self.data, headers=self.headers, cookies=self.cookies,
files=self.files, timeout=self.timeout)
elif self.method == "put":
response = requests.put(url=self.url, data=self.data, headers=self.headers, cookies=self.cookies,
timeout=self.timeout)
elif self.method == "delete":
response = requests.delete(url=self.url, data=self.data, headers=self.headers, cookies=self.cookies,
timeout=self.timeout)
else:
response = None
return response
def get_response(self):
"""
获取响应
:return: 响应
"""
response = self.send_request()
return response
def get_response_json(self):
"""
获取响应json
:return: 响应json
"""
response = self.send_request()
return response.json()
def get_response_text(self):
"""
获取响应文本
:return: 响应文本
"""
response = self.send_request()
return response.text
def get_response_status_code(self):
"""
获取响应状态码
:return: 响应状态码
"""
response = self.send_request()
return response.status_code
def get_response_headers(self):
"""
获取响应头
:return: 响应头
"""
response = self.send_request()
return response.headers
def get_response_cookies(self):
"""
获取响应cookies
:return: 响应cookies
"""
response = self.send_request()
return response.cookies
def get_response_content(self):
"""
获取响应内容
:return: 响应内容
"""
response = self.send_request()
return response.content
def get_response_encoding(self):
"""
获取响应编码
:return: 响应编码
"""
response = self.send_request()
return response.encoding
def get_response_reason(self):
"""
获取响应原因
:return: 响应原因
"""
response = self.send_request()
return response.reason
def get_response_is_redirect(self):
"""
获取响应是否重定向
:return: 响应是否重定向
"""
response = self.send_request()
return response.is_redirect
def verify_response_is_permanent_redirect(self):
"""
验证响应是否永久重定向
:return: 响应是否永久重定向
"""
response = self.send_request()
return response.is_permanent_redirect
def verify_response_is_ok(self):
"""
验证响应是否成功
:return: 响应是否成功
"""
response = self.send_request()
return response.ok
def get_response_history(self):
"""
获取响应历史
:return: 响应历史
"""
response = self.send_request()
return response.history
def get_response_links(self):
"""
获取响应链接
:return: 响应链接
"""
response = self.send_request()
return response.links
def get_response_next(self):
"""
获取响应下一个
:return: 响应下一个
"""
response = self.send_request()
return response.next
def get_response_url(self):
"""
获取响应url
:return: 响应url
"""
response = self.send_request()
return response.url
def get_response_elapsed(self):
"""
获取响应时间
:return: 响应时间
"""
response = self.send_request()
return response.elapsed
def get_response_raw(self):
"""
获取响应原始
:return: 响应原始
"""
response = self.send_request()
return response.raw
def get_response_request(self):
"""
获取响应请求
:return: 响应请求
"""
response = self.send_request()
return response.request
def get_response_apparent_encoding(self):
"""
获取响应编码
:return: 响应编码
"""
response = self.send_request()
return response.apparent_encoding
def get_response_close(self):
"""
获取响应关闭
:return: 响应关闭
"""
response = self.send_request()
return response.close | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/api_test/http_common.py | http_common.py |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
TAG = "SqlAlchemyCommon"
class SqlAlchemyCommon(object):
@staticmethod
def get_engine(db_type, db_name, db_user=None, db_password=None, db_host=None, db_port=None):
"""
获取数据库连接引擎
:param db_type: 数据库类型
:param db_name: 数据库名称
:param db_user: 数据库用户名
:param db_password: 数据库密码
:param db_host: 数据库主机
:param db_port: 数据库端口
:return: 数据库连接引擎
"""
if db_type == "mysql":
engine = create_engine(
"mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8".format(db_user, db_password, db_host, db_port, db_name))
elif db_type == "oracle":
engine = create_engine(
"oracle+cx_oracle://{}:{}@{}:{}/{}".format(db_user, db_password, db_host, db_port, db_name))
elif db_type == "postgresql":
engine = create_engine(
"postgresql+psycopg2://{}:{}@{}:{}/{}".format(db_user, db_password, db_host, db_port, db_name))
elif db_type == "sqlite":
engine = create_engine("sqlite:///{}".format(db_name))
elif db_type == "db2":
engine = create_engine(
"db2+ibm_db://{}:{}@{}:{}/{}".format(db_user, db_password, db_host, db_port, db_name))
else:
raise Exception("Database type error")
return engine
@staticmethod
def get_session(engine):
"""
获取数据库会话
:param engine: 数据库连接引擎
:return: 数据库会话
"""
session = sessionmaker(bind=engine)
session = session()
return session
@staticmethod
def get_tab_fields(db_class, db_type, db_name):
"""
获取表的所有字段
:param db_class: 表类,如:class
:param db_type: 数据库类型
:param db_name: 数据库名称
:return: 表的所有字段
"""
db = db_class()
engine = SqlAlchemyCommon.get_engine(db_type, db_name)
session = SqlAlchemyCommon.get_session(engine)
d = session.query(db).all()
session.close()
return d | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/common/database/sqlalchemy_common.py | sqlalchemy_common.py |
import configparser
TAG = "ConfCommon"
class ConfCommon(object):
def __init__(self, conf_file):
"""
初始化配置文件
:param conf_file: 配置文件路径
"""
self.conf_file = conf_file
self.conf = configparser.ConfigParser()
self.conf.optionxform = lambda option: option
self.conf.read(self.conf_file, encoding="utf-8")
def get_conf(self, section, option):
"""
获取配置文件中的值
:param section: section名称
:param option: option名称
:return: option对应的值
"""
try:
return self.conf.get(section, option)
except configparser.NoSectionError:
return None
except configparser.NoOptionError:
return None
def set_conf(self, section, option, value):
"""
设置配置文件中的值
:param section: section名称
:param option: option名称
:param value: option对应的值
:return: None
"""
try:
self.conf.set(section, option, value)
except configparser.NoSectionError:
self.conf.add_section(section)
self.conf.set(section, option, value)
def save_conf(self):
"""
保存配置文件
:return: None
"""
self.conf.write(open(self.conf_file, mode="w", encoding="utf-8"))
def get_sections(self):
"""
获取配置文件中的所有section
:return: section列表
"""
return self.conf.sections()
def get_options(self, section):
"""
获取配置文件中的所有option
:param section: section名称
:return: option列表
"""
return self.conf.options(section)
def get_items(self, section):
"""
获取配置文件中的所有items
:param section: section名称
:return: items列表
"""
return self.conf.items(section)
def get_all_dict(self):
"""
获取配置文件中的所有内容
:return: 配置文件中的所有内容
"""
return dict([(section, dict(self.conf.items(section))) for section in self.conf.sections()])
def get_section_dict(self, section):
"""
获取配置文件中section的所有内容
:return: 配置文件中的所有内容
"""
return dict(self.conf.items(section)) | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/common/config/conf_common.py | conf_common.py |
from ats.common.log.log import LogTest
TAG = "Assertion"
class Assertion(object):
@staticmethod
def assert_equal(actual, expected, message=None):
"""
断言相等
:param actual: 实际值
:param expected: 期望值
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertEquals: actual = {}, expected = {}".format(actual, expected))
if actual == expected:
return
LogTest.error(TAG, "AssertEquals Failed: actual = {}, expected = {}".format(actual, expected))
raise AssertionError(message)
@staticmethod
def assert_not_equal(actual, expected, message=None):
"""
断言不相等
:param actual: 实际值
:param expected: 期望值
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotEquals: actual = {}, expected = {}".format(actual, expected))
if actual != expected:
return
LogTest.error(TAG, "AssertNotEquals Failed: actual = {}, expected = {}".format(actual, expected))
raise AssertionError(message)
@staticmethod
def assert_true(condition, message=None):
"""
断言条件为真
:param condition: 条件
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertTrue: condition = {}".format(condition))
if condition:
return
LogTest.error(TAG, "AssertTrue Failed: condition = {}".format(condition))
raise AssertionError(message)
@staticmethod
def assert_false(condition, message=None):
"""
断言条件为假
:param condition: 条件
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertFalse: condition = {}".format(condition))
if not condition:
return
LogTest.error(TAG, "AssertFalse Failed: condition = {}".format(condition))
raise AssertionError(message)
@staticmethod
def assert_in(member, container, message=None):
"""
断言成员在容器中
:param member: 成员
:param container: 容器
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIn: member = {}, container = {}".format(member, container))
if member in container:
return
LogTest.error(TAG, "AssertIn Failed: member = {}, container = {}".format(member, container))
raise AssertionError(message)
@staticmethod
def assert_not_in(member, container, message=None):
"""
断言成员不在容器中
:param member: 成员
:param container: 容器
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotIn: member = {}, container = {}".format(member, container))
if member not in container:
return
LogTest.error(TAG, "AssertNotIn Failed: member = {}, container = {}".format(member, container))
raise AssertionError(message)
@staticmethod
def assert_is(var1, var2, message=None):
"""
断言两个变量是同一个对象
:param var1: 变量1
:param var2: 变量2
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIs: var1 = {}, var2 = {}".format(var1, var2))
if var1 is var2:
return
LogTest.error(TAG, "AssertIs Failed: var1 = {}, var2 = {}".format(var1, var2))
raise AssertionError(message)
@staticmethod
def assert_is_not(var1, var2, message=None):
"""
断言两个变量不是同一个对象
:param var1: 变量1
:param var2: 变量2
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIsNot: var1 = {}, var2 = {}".format(var1, var2))
if var1 is not var2:
return
LogTest.error(TAG, "AssertIsNot Failed: var1 = {}, var2 = {}".format(var1, var2))
raise AssertionError(message)
@staticmethod
def assert_is_none(var, message=None):
"""
断言变量为空
:param var: 变量
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNone: var = {}".format(var))
if var is None:
return
LogTest.error(TAG, "AssertNone Failed: var = {}".format(var))
raise AssertionError(message)
@staticmethod
def assert_is_not_none(var, message=None):
"""
断言变量不为空
:param var: 变量
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotNone: var = {}".format(var))
if var is not None:
return
LogTest.error(TAG, "AssertNotNone Failed: var = {}".format(var))
raise AssertionError(message)
@staticmethod
def assert_is_instance(obj, cls, message=None):
"""
断言对象是指定类型
:param obj: 对象
:param cls: 类型
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIsInstance: obj = {}, cls = {}".format(obj, cls))
if isinstance(obj, cls):
return
LogTest.error(TAG, "AssertIsInstance Failed: obj = {}, cls = {}".format(obj, cls))
raise AssertionError(message)
@staticmethod
def assert_not_is_instance(obj, cls, message=None):
"""
断言对象不是指定类型
:param obj: 对象
:param cls: 类型
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotIsInstance: obj = {}, cls = {}".format(obj, cls))
if not isinstance(obj, cls):
return
LogTest.error(TAG, "AssertNotIsInstance Failed: obj = {}, cls = {}".format(obj, cls))
raise AssertionError(message) | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/common/assertion/assert_common.py | assert_common.py |
from ats.common.log.log import LogTest
TAG = "SoftAssert"
class SoftAssert(object):
def __init__(self):
self._verificationErrors = []
def verify_equal(self, actual, expected, message=None):
"""
软断言相等
:param actual: 实际值
:param expected: 期望值
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyEqual: actual = {}, expected = {}".format(actual, expected))
try:
assert actual == expected
except AssertionError:
LogTest.error(TAG, "VerifyEqual Failed: actual = {}, expected = {}".format(actual, expected))
self._verificationErrors.append(message)
def verify_not_equal(self, actual, expected, message=None):
"""
软断言不相等
:param actual: 实际值
:param expected: 期望值
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyNotEqual: actual = {}, expected = {}".format(actual, expected))
try:
assert actual != expected
except AssertionError:
LogTest.error(TAG, "VerifyNotEqual Failed: actual = {}, expected = {}".format(actual, expected))
self._verificationErrors.append(message)
def verify_true(self, condition, message=None):
"""
软断言条件为真
:param condition: 条件
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyTrue: condition = {}".format(condition))
try:
assert condition
except AssertionError:
LogTest.error(TAG, "VerifyTrue Failed: condition = {}".format(condition))
self._verificationErrors.append(message)
def verify_false(self, condition, message=None):
"""
软断言条件为假
:param condition: 条件
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyFalse: condition = {}".format(condition))
try:
assert not condition
except AssertionError:
LogTest.error(TAG, "VerifyFalse Failed: condition = {}".format(condition))
self._verificationErrors.append(message)
def verify_in(self, member, container, message=None):
"""
软断言成员存在于容器中
:param member: 成员
:param container: 容器
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyIn: member = {}, container = {}".format(member, container))
try:
assert member in container
except AssertionError:
LogTest.error(TAG, "VerifyIn Failed: member = {}, container = {}".format(member, container))
self._verificationErrors.append(message)
def verify_not_in(self, member, container, message=None):
"""
软断言成员不存在于容器中
:param member: 成员
:param container: 容器
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyNotIn: member = {}, container = {}".format(member, container))
try:
assert member not in container
except AssertionError:
LogTest.error(TAG, "VerifyNotIn Failed: member = {}, container = {}".format(member, container))
self._verificationErrors.append(message)
def verify_is(self, expr1, expr2, message=None):
"""
软断言两个表达式引用同一个对象
:param expr1: 表达式1
:param expr2: 表达式2
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyIs: expr1 = {}, expr2 = {}".format(expr1, expr2))
try:
assert expr1 is expr2
except AssertionError:
LogTest.error(TAG, "VerifyIs Failed: expr1 = {}, expr2 = {}".format(expr1, expr2))
self._verificationErrors.append(message)
def verify_is_not(self, expr1, expr2, message=None):
"""
软断言两个表达式引用不同的对象
:param expr1: 表达式1
:param expr2: 表达式2
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyIsNot: expr1 = {}, expr2 = {}".format(expr1, expr2))
try:
assert expr1 is not expr2
except AssertionError:
LogTest.error(TAG, "VerifyIsNot Failed: expr1 = {}, expr2 = {}".format(expr1, expr2))
self._verificationErrors.append(message)
def verify_is_none(self, obj, message=None):
"""
软断言对象为None
:param obj: 对象
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyIsNone: obj = {}".format(obj))
try:
assert obj is None
except AssertionError:
LogTest.error(TAG, "VerifyIsNone Failed: obj = {}".format(obj))
self._verificationErrors.append(message)
def verify_is_not_none(self, obj, message=None):
"""
软断言对象不为None
:param obj: 对象
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyIsNotNone: obj = {}".format(obj))
try:
assert obj is not None
except AssertionError:
LogTest.error(TAG, "VerifyIsNotNone Failed: obj = {}".format(obj))
self._verificationErrors.append(message)
def verify_is_instance(self, obj, cls, message=None):
"""
软断言对象是指定类型
:param obj: 对象
:param cls: 类型
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyIsInstance: obj = {}, cls = {}".format(obj, cls))
try:
assert isinstance(obj, cls)
except AssertionError:
LogTest.error(TAG, "VerifyIsInstance Failed: obj = {}, cls = {}".format(obj, cls))
self._verificationErrors.append(message)
def verify_not_is_instance(self, obj, cls, message=None):
"""
软断言对象不是指定类型
:param obj: 对象
:param cls: 类型
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "VerifyNotIsInstance: obj = {}, cls = {}".format(obj, cls))
try:
assert not isinstance(obj, cls)
except AssertionError:
LogTest.error(TAG, "VerifyNotIsInstance Failed: obj = {}, cls = {}".format(obj, cls))
self._verificationErrors.append(message)
def verify_errors(self):
"""
软断言,如果有错误,抛出异常
:return: None
"""
if len(self._verificationErrors) > 0:
raise AssertionError("SoftAssert Failed, Errors appeared")
else:
LogTest.debug(TAG, "SoftAssert Passed")
self._verificationErrors.clear() | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/common/assertion/soft_assert_common.py | soft_assert_common.py |
from treelib import Tree
TAG = "TreeCommon"
class TreeCommon(object):
@staticmethod
def get_tree_root():
"""
获取树根
:return: 树根
"""
return Tree()
@staticmethod
def create_tree(tree_list):
"""
创建树结构
:param tree_list: 树列表
:return: 树结构
"""
tree = Tree()
for node in tree_list:
tree.create_node(*node)
return tree
@staticmethod
def get_node_path_by_id(tree, node_id):
"""
获取节点路径
:param tree: 树结构
:param node_id: 节点id
:return: 节点路径
"""
path = []
node_tree = tree.rsearch(node_id)
while True:
try:
path.append(next(node_tree))
except StopIteration as e:
break
return path
@staticmethod
def get_node_path_by_tag(tree, node_id):
"""
获取节点路径
:param tree: 树结构
:param node_id: 节点id
:return: 节点路径
"""
path = []
node_tree = tree.rsearch(node_id)
while True:
try:
nodes_id = next(node_tree)
tag_name = TreeCommon.translate_node_id2tag(tree, nodes_id)
path.append(tag_name)
except StopIteration as e:
break
return path
@staticmethod
def translate_node_id2tag(tree, node_id):
"""
节点id转换为tag
:param tree: 树结构
:param node_id: 节点id
:return: 节点tag
"""
node = tree.get_node(node_id)
tag = node.tag
return tag
@staticmethod
def show_tree(tree):
"""
展示树结构
:param tree: 树结构
:return: None
"""
tree.show()
@staticmethod
def get_tree_node(tree, node_id):
"""
获取树节点
:param tree: 树结构
:param node_id: 节点id
:return: 节点
"""
return tree.get_node(node_id)
@staticmethod
def get_tree_node_children(tree, node_id):
"""
获取树节点的子节点
:param tree: 树结构
:param node_id: 节点id
:return: 子节点
"""
return tree.children(node_id)
@staticmethod
def get_tree_node_father(tree, node_id):
"""
获取树节点的父节点
:param tree: 树结构
:param node_id: 节点id
:return: 父节点
"""
return tree.parent(node_id)
@staticmethod
def get_tree_node_siblings(tree, node_id):
"""
获取树节点的兄弟节点
:param tree: 树结构
:param node_id: 节点id
:return: 兄弟节点
"""
return tree.siblings(node_id)
@staticmethod
def get_tree_node_depth(tree, node_id):
"""
获取树节点的深度
:param tree: 树结构
:param node_id: 节点id
:return: 节点深度
"""
return tree.depth(node_id)
@staticmethod
def get_tree_node_height(tree, node_id):
"""
获取树节点的高度
:param tree: 树结构
:param node_id: 节点id
:return: 节点高度
"""
return tree.height(node_id)
@staticmethod
def get_tree_node_size(tree, node_id):
"""
获取树节点的大小
:param tree: 树结构
:param node_id: 节点id
:return: 节点大小
"""
return tree.size(node_id) | ATS-Library | /ATS_Library-1.2-py3-none-any.whl/ats/common/tree/tree_common.py | tree_common.py |
import requests
import json
import base64
import asyncio
class ATSalarm:
def __init__(self, alarmIP, alarmPort, alarmCode, alarmPin, loop):
#init variables
self.alarmIP = alarmIP
self.alarmPort = alarmPort
self.alarmCode = alarmCode
self.alarmPin = alarmPin
self.server = 'https://security.syca.nl/'
self.lastMessage = {}
self.zoneStates = {}
self._loop = loop
#making initial data dict
self.data={'alarmIP':self.alarmIP,'alarmPort':self.alarmPort,'alarmCode':self.alarmCode,'alarmPin':self.alarmPin,'task':''}
self.Connect()
@asyncio.coroutine
def Connect(self, task="servercheck", zone=""):
self.data['task'] = task
if zone == "":
r = requests.post(self.server, data=self.data, verify=False)
self.koekjes = r.cookies
self.lastMessage = json.loads(r.text)
print (self.lastMessage)
self.status()
else:
# localData to append area
localData = self.data
localData["area"] = zone
r = requests.post(self.server, data=localData, verify=False)
self.koekjes = r.cookies
self.lastMessage = json.loads(r.text)
print (self.lastMessage)
@asyncio.coroutine
def status(self):
# set task
task = 'status'
self.data['task'] = task
# keep reconnecting till all data is retrieved
#print(self.lastMessage)
while "reconnect" in self.lastMessage:
if self.lastMessage["reconnect"]:
r = requests.post(self.server, data=self.data, verify=False, cookies=self.koekjes)
self.lastMessage = json.loads(r.text)
#print(self.lastMessage)
if "messages" in self.lastMessage:
for message in self.lastMessage["messages"]:
if message["type"] == "data":
if message["status"] == "areaButtons":
self.zoneStates = json.loads(base64.standard_b64decode(message["code"]))
print(self.zoneStates)
print("amount of zones: " + str(len(self.zoneStates)))
for zone in self.zoneStates:
if zone["status"] == 1:
print(zone["name"] + " not armed")
else:
print(zone["name"] + " armed")
else:
break
@asyncio.coroutine
def arm(self, zone):
self.Connect(task="areaon", zone=zone)
@asyncio.coroutine
def disarm(self, zone):
self.Connect(task="areaoff", zone=zone) | ATSAPI | /ATSAPI-0.4-py3-none-any.whl/ATSAPI.py | ATSAPI.py |
# ATTACK_THE_ENEMY_RPG
**Attack The Enemy RPG** is an offline RPG where the player can face an infinite number of rounds.
## Executable File
The executable file is downloadable at https://github.com/GlobalCreativeApkDev/INDONESIAN_PROGRAMMERS/blob/main/ATTACK_THE_ENEMY_RPG/ATTACK_THE_ENEMY_RPG/dist/attack_the_enemy_rpg/attack_the_enemy_rpg.
## Source Code
Python code used to create the game is available in https://github.com/GlobalCreativeApkDev/INDONESIAN_PROGRAMMERS/blob/main/ATTACK_THE_ENEMY_RPG/ATTACK_THE_ENEMY_RPG/attack_the_enemy_rpg.py.
## Installation
**pip install ATTACK_THE_ENEMY_RPG**
## How to Use the Executable File?
First, open by double-clicking the file "attack_the_enemy_rpg".
How the executable file looks like is shown in the image below (the file is enclosed with a red rectangle).
**Image 1**

## Getting Started
After you run the game, you will be asked to enter your name. Then, you will be redirected to the main menu.
**Image 2**

## Main Menu
After you enter your name, you will be asked whether you want to continue playing the game or not.
**Image 3**

## Gameplay
You will be able to battle in an infinite number of rounds during gameplay. Losing a round will send you back to the
first round. However, if you win a round of battle, you will proceed to the next round, get stronger, and face a
stronger enemy.
**Image 4**

| ATTACK-THE-ENEMY-RPG | /ATTACK_THE_ENEMY_RPG-1.tar.gz/ATTACK_THE_ENEMY_RPG-1/README.md | README.md |
import sys
import os
import uuid
import copy
import random
import mpmath
from mpmath import mp, mpf
mp.pretty = True
# Creating static function to be used throughout the game.
def clear():
# type: () -> None
if sys.platform.startswith('win'):
os.system('cls') # For Windows System
else:
os.system('clear') # For Linux System
# Creating necessary classes to be used in the game.
class Player:
"""
This class contains attributes of a player in this game.
"""
def __init__(self, name):
# type: (str) -> None
self.player_id: str = str(uuid.uuid1()) # Generate random player ID
self.name: str = name
self.level: int = 1
self.max_hp: mpf = mpf(random.randint(120, 150))
self.curr_hp: mpf = self.max_hp
self.attack_power: mpf = mpf(random.randint(40, 50))
self.defense: mpf = mpf(random.randint(20, 30))
self.crit_chance: mpf = mpf("0.5")
def __str__(self):
# type: () -> str
res: str = "" # initial value
res += "Player ID: " + str(self.player_id) + "\n"
res += "Name: " + str(self.name) + "\n"
res += "Level: " + str(self.level) + "\n"
res += "HP: " + str(self.curr_hp) + "/" + str(self.max_hp) + "\n"
res += "Attack Power: " + str(self.attack_power) + "\n"
res += "Defense: " + str(self.defense) + "\n"
return res
def is_alive(self):
# type: () -> bool
return self.curr_hp > 0
def restore(self):
# type: () -> None
self.curr_hp = self.max_hp
def level_up(self):
# type: () -> None
self.level += 1
self.max_hp *= 2
self.restore()
self.attack_power *= 2
self.defense *= 2
def attack(self, other):
# type: (Player) -> None
is_crit: bool = random.random() < self.crit_chance
raw_damage: mpf = self.attack_power * mpf("2") - other.defense if is_crit else \
self.attack_power - other.defense
damage: mpf = mpf("0") if raw_damage < 0 else raw_damage
other.curr_hp -= damage
print(str(self.name) + " dealt " + str(damage) + " damage on " + str(other.name) + "!")
def clone(self):
# type: () -> Player
return copy.deepcopy(self)
class CPU(Player):
"""
This class contains attributes of a CPU controlled player in this game.
"""
def __init__(self):
# type: () -> None
Player.__init__(self, "CPU")
# Creating main function used to run the game.
def main() -> int:
"""
This main function is used to run the game.
:return: an integer
"""
print("Welcome to 'Attack The Enemy RPG' by 'GlobalCreativeApkDev'.")
print("In this game, your mission is to survive as many rounds as possible.")
name: str = input("Please enter your name: ")
player: Player = Player(name)
cpu: CPU = CPU()
clear()
round_number: int = 1
turn: int = 0
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
continue_playing: str = input("Do you want to continue playing 'Attack The Enemy RPG'? ")
while continue_playing == "Y":
while player.is_alive() and cpu.is_alive():
clear()
print("#################### ROUND " + str(round_number) + " ####################")
print("Player stats:\n" + str(player) + "\n")
print("CPU stats:\n" + str(cpu) + "\n")
turn += 1
if turn % 2 == 1:
print("It is your turn to attack.")
attack: str = input("Enter anything to attack: ")
player.attack(cpu)
else:
print("It is CPU's turn to attack.")
cpu.attack(player)
if not player.is_alive():
clear()
print("GAME OVER!!!! " + str(player.name).upper() + " DIED!!!! YOU REACHED ROUND "
+ str(round_number) + "!!!!")
round_number = 1
turn = 0
player = Player(name)
cpu = CPU()
elif not cpu.is_alive():
clear()
print("YOU WON THE BATTLE IN ROUND " + str(round_number) + "!!!!")
round_number += 1
turn = 0
player_level_ups: int = random.randint(1, 100)
cpu_level_ups: int = random.randint(1, 100)
for i in range(player_level_ups):
player.level_up()
for i in range(cpu_level_ups):
cpu.level_up()
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
continue_playing = input("Do you want to continue playing 'Attack The Enemy RPG'? ")
return 0
if __name__ == '__main__':
main() | ATTACK-THE-ENEMY-RPG | /ATTACK_THE_ENEMY_RPG-1.tar.gz/ATTACK_THE_ENEMY_RPG-1/ATTACK_THE_ENEMY_RPG/attack_the_enemy_rpg.py | attack_the_enemy_rpg.py |
# ATUS Transformer package
[](https://opensource.org/licenses/MIT)
This package contains two functions. One of them transforms the original ATUS activity codes into 11 broad categories:
1. Sleep
2. Personal Care
3. Housework
4. Child Care
5. Adult Care
6. Work and Education
7. Shopping
8. TV Watching
9. Eating
10. Leisure
11. Travel and Other
The second function recodes transformed activity codes into string names.
## Installation
```
pip install ATUS-Transformer
```
## Usage
- There is (for now) 2 functions:
- activityDictionary(). The arguments is a string containing ATUS original activity code.
- activityNumberToString(). The argument is a number between 1 and 11.
## Example: activityDictionary
```
import atus_transformer
atus_transformer.activityDictionary("10101")
```
**Output**
```
1
```
## Note
- Contributions are welcome. Contact: [email protected]
## Author
Kamila Kolpashnikova 2021 | ATUS-Transformer | /ATUS_Transformer-1.0.1.tar.gz/ATUS_Transformer-1.0.1/README.md | README.md |
def activityDictionary(activity):
activity_var = {
"10101": 1,
"10102": 1,
"10199": 1,
"10201": 2,
"10299": 2,
"10301": 2,
"10399": 2,
"10401": 2,
"10499": 2,
"10501": 2,
"19999": 2,
"20101": 3,
"20102": 3,
"20103": 3,
"20104": 3,
"20199": 3,
"20201": 3,
"20202": 3,
"20203": 3,
"20299": 3,
"20301": 3,
"20302": 3,
"20303": 3,
"20399": 3,
"20400": 3,
"20401": 3,
"20402": 3,
"20499": 3,
"20500": 3,
"20501": 3,
"20502": 3,
"20599": 3,
"20600": 3,
"20601": 3,
"20602": 3,
"20603": 3,
"20681": 10,
"20699": 3,
"20700": 3,
"20701": 3,
"20799": 3,
"20800": 3,
"20801": 3,
"20899": 3,
"20900": 3,
"20901": 3,
"20902": 3,
"20903": 3,
"20904": 3,
"20905": 3,
"20999": 3,
"29900": 3,
"29999": 3,
"30100": 4,
"30101": 4,
"30102": 4,
"30103": 4,
"30104": 4,
"30105": 4,
"30106": 4,
"30107": 4,
"30108": 4,
"30109": 4,
"30110": 4,
"30111": 4,
"30112": 4,
"30199": 4,
"30200": 4,
"30201": 4,
"30202": 4,
"30203": 4,
"30204": 4,
"30299": 4,
"30300": 4,
"30301": 4,
"30302": 4,
"30303": 4,
"30399": 4,
"40100": 4,
"40101": 4,
"40102": 4,
"40103": 4,
"40104": 4,
"40105": 4,
"40106": 4,
"40107": 4,
"40108": 4,
"40109": 4,
"40110": 4,
"40111": 4,
"40112": 4,
"40199": 4,
"40200": 4,
"40201": 4,
"40202": 4,
"40203": 4,
"40204": 4,
"40299": 4,
"40300": 4,
"40301": 4,
"40302": 4,
"40303": 4,
"40399": 4,
"30186": 4,
"40186": 4,
"30000": 5,
"30400": 5,
"30401": 5,
"30402": 5,
"30403": 5,
"30404": 5,
"30405": 5,
"30499": 5,
"30500": 5,
"30501": 5,
"30502": 5,
"30503": 5,
"30504": 5,
"30599": 5,
"39900": 5,
"39999": 5,
"40000": 5,
"40400": 5,
"40401": 5,
"40402": 5,
"40403": 5,
"40404": 5,
"40405": 5,
"40499": 5,
"40500": 5,
"40501": 5,
"40502": 5,
"40503": 5,
"40504": 5,
"40505": 5,
"40506": 5,
"40507": 5,
"40508": 5,
"40599": 5,
"49900": 5,
"49999": 5,
"50000": 6,
"50100": 6,
"50101": 6,
"50102": 6,
"50103": 6,
"50104": 6,
"50199": 6,
"50200": 6,
"50201": 6,
"50202": 6,
"50203": 6,
"50204": 6,
"50205": 6,
"50299": 6,
"50300": 6,
"50301": 6,
"50302": 6,
"50303": 6,
"50304": 6,
"50305": 6,
"50399": 6,
"50400": 6,
"50401": 6,
"50403": 6,
"50404": 6,
"50405": 6,
"50499": 6,
"59900": 6,
"59999": 6,
"60000": 6,
"60100": 6,
"60101": 6,
"60102": 6,
"60103": 6,
"60104": 6,
"60199": 6,
"60200": 6,
"60201": 6,
"60202": 6,
"60203": 6,
"60204": 6,
"60299": 6,
"60300": 6,
"60301": 6,
"60302": 6,
"60303": 6,
"60399": 6,
"60400": 6,
"60401": 6,
"60402": 6,
"60403": 6,
"60499": 6,
"69900": 6,
"69999": 6,
"50481": 6,
"50389": 6,
"50189": 6,
"60289": 6,
"50289": 6,
"70000": 7,
"70100": 7,
"70101": 7,
"70102": 7,
"70103": 7,
"70104": 7,
"70105": 7,
"70199": 7,
"70200": 7,
"70201": 7,
"70299": 7,
"70300": 7,
"70301": 7,
"70399": 7,
"79900": 7,
"79999": 7,
"80000": 7,
"80100": 7,
"80101": 7,
"80102": 7,
"80199": 7,
"80200": 7,
"80201": 7,
"80202": 7,
"80203": 7,
"80299": 7,
"80300": 7,
"80301": 7,
"80302": 7,
"80399": 7,
"80400": 7,
"80401": 7,
"80402": 7,
"80403": 7,
"80499": 7,
"80500": 7,
"80501": 7,
"80502": 7,
"80599": 7,
"80600": 7,
"80601": 7,
"80602": 7,
"80699": 7,
"80700": 7,
"80701": 7,
"80702": 7,
"80799": 7,
"80800": 7,
"80801": 7,
"80899": 7,
"89900": 7,
"89999": 7,
"90000": 7,
"90100": 7,
"90101": 7,
"90102": 7,
"90103": 7,
"90104": 7,
"90199": 7,
"90200": 7,
"90201": 7,
"90202": 7,
"90299": 7,
"90300": 7,
"90301": 7,
"90302": 7,
"90399": 7,
"90400": 7,
"90401": 7,
"90402": 7,
"90499": 7,
"90500": 7,
"90501": 7,
"90502": 7,
"90599": 7,
"99900": 7,
"99999": 7,
"100000": 7,
"100100": 7,
"100101": 7,
"100102": 7,
"100103": 7,
"100199": 7,
"100200": 7,
"100201": 7,
"100299": 7,
"100300": 7,
"100303": 7,
"100304": 7,
"100399": 7,
"100400": 7,
"100401": 7,
"100499": 7,
"109900": 7,
"109999": 7,
"120303": 8,
"120304": 8,
"110000": 9,
"110100": 9,
"110101": 9,
"110199": 9,
"110200": 9,
"110201": 9,
"110299": 9,
"119900": 9,
"110289": 9,
"119999": 9,
"120000": 10,
"120100": 10,
"120101": 10,
"120199": 10,
"120200": 10,
"120201": 10,
"120202": 10,
"120299": 10,
"120300": 10,
"120301": 10,
"120302": 10,
"120305": 10,
"120306": 10,
"120307": 10,
"120308": 10,
"120309": 10,
"120310": 10,
"120311": 10,
"120312": 10,
"120313": 10,
"120399": 10,
"120400": 10,
"120401": 10,
"120402": 10,
"120403": 10,
"120404": 10,
"120405": 10,
"120499": 10,
"120500": 10,
"120501": 10,
"120502": 10,
"120503": 10,
"120504": 10,
"120599": 10,
"129900": 10,
"129999": 10,
"130000": 10,
"130100": 10,
"130101": 10,
"130102": 10,
"130103": 10,
"130104": 10,
"130105": 10,
"130106": 10,
"130107": 10,
"130108": 10,
"130109": 10,
"130110": 10,
"130111": 10,
"130112": 10,
"130113": 10,
"130114": 10,
"130115": 10,
"130116": 10,
"130117": 10,
"130118": 10,
"130119": 10,
"130120": 10,
"130121": 10,
"130122": 10,
"130123": 10,
"130124": 10,
"130125": 10,
"130126": 10,
"130127": 10,
"130128": 10,
"130129": 10,
"130130": 10,
"130131": 10,
"130132": 10,
"130133": 10,
"130134": 10,
"130135": 10,
"130136": 10,
"130199": 10,
"130200": 10,
"130201": 10,
"130202": 10,
"130203": 10,
"130204": 10,
"130205": 10,
"130206": 10,
"130207": 10,
"130208": 10,
"130209": 10,
"130210": 10,
"130211": 10,
"130212": 10,
"130213": 10,
"130214": 10,
"130215": 10,
"130216": 10,
"130217": 10,
"130218": 10,
"130219": 10,
"130220": 10,
"130221": 10,
"130222": 10,
"130223": 10,
"130224": 10,
"130225": 10,
"130226": 10,
"130227": 10,
"130228": 10,
"130229": 10,
"130230": 10,
"130231": 10,
"130232": 10,
"130299": 10,
"130300": 10,
"130301": 10,
"130302": 10,
"130399": 10,
"130400": 10,
"130401": 10,
"130402": 10,
"130499": 10,
"139900": 10,
"139999": 10,
"140000": 10,
"140100": 10,
"140101": 10,
"140102": 10,
"140103": 10,
"140104": 10,
"140105": 10,
"149900": 10,
"149999": 10,
"150000": 10,
"150100": 10,
"150101": 10,
"150102": 10,
"150103": 10,
"150104": 10,
"150105": 10,
"150106": 10,
"150199": 10,
"150200": 10,
"150201": 10,
"150202": 10,
"150203": 10,
"150204": 10,
"150299": 10,
"150300": 10,
"150301": 10,
"150302": 10,
"150399": 10,
"150400": 10,
"150401": 10,
"150402": 10,
"150499": 10,
"150500": 10,
"150501": 10,
"150599": 10,
"150600": 10,
"150601": 10,
"150602": 10,
"150699": 10,
"150700": 10,
"150701": 10,
"150799": 10,
"150800": 10,
"150801": 10,
"150899": 10,
"159900": 10,
"159999": 10,
"160000": 10,
"160100": 10,
"160101": 10,
"160102": 10,
"160103": 10,
"160104": 10,
"160105": 10,
"160106": 10,
"160107": 10,
"160108": 10,
"160199": 10,
"160200": 10,
"160201": 10,
"160299": 10,
"169900": 10,
"169999": 10,
"159989": 10,
"169989": 10,
"110281": 10,
"100381": 10,
"100383": 10,
"180000": 11,
"180100": 11,
"180101": 11,
"180199": 11,
"180200": 11,
"180201": 11,
"180202": 11,
"180203": 11,
"180204": 11,
"180205": 11,
"180206": 11,
"180207": 11,
"180208": 11,
"180209": 11,
"180280": 11,
"180299": 11,
"180300": 11,
"180301": 11,
"180302": 11,
"180303": 11,
"180304": 11,
"180305": 11,
"180306": 11,
"180307": 11,
"180399": 11,
"180400": 11,
"180401": 11,
"180402": 11,
"180403": 11,
"180404": 11,
"180405": 11,
"180406": 11,
"180407": 11,
"180482": 11,
"180499": 11,
"180500": 11,
"180501": 11,
"180502": 11,
"180503": 11,
"180504": 11,
"180599": 11,
"180600": 11,
"180601": 11,
"180602": 11,
"180603": 11,
"180604": 11,
"180605": 11,
"180699": 11,
"180700": 11,
"180701": 11,
"180702": 11,
"180703": 11,
"180704": 11,
"180705": 11,
"180782": 11,
"180799": 11,
"180800": 11,
"180801": 11,
"180802": 11,
"180803": 11,
"180804": 11,
"180805": 11,
"180806": 11,
"180807": 11,
"180899": 11,
"180900": 11,
"180901": 11,
"180902": 11,
"180903": 11,
"180904": 11,
"180905": 11,
"180999": 11,
"181000": 11,
"181001": 11,
"181002": 11,
"181099": 11,
"181100": 11,
"181101": 11,
"181199": 11,
"181200": 11,
"181201": 11,
"181202": 11,
"181203": 11,
"181204": 11,
"181205": 11,
"181206": 11,
"181283": 11,
"181299": 11,
"181300": 11,
"181301": 11,
"181302": 11,
"181399": 11,
"181400": 11,
"181401": 11,
"181499": 11,
"181500": 11,
"181501": 11,
"181599": 11,
"181600": 11,
"181601": 11,
"181699": 11,
"181800": 11,
"181801": 11,
"181899": 11,
"189900": 11,
"189999": 11,
"180481": 11,
"180381": 11,
"180382": 11,
"181081": 11,
"180589": 11,
"180682": 11,
"500000": 11,
"500100": 11,
"500101": 11,
"500102": 11,
"500103": 11,
"500104": 11,
"500105": 11,
"500106": 11,
"500107": 11,
"509900": 11,
"509989": 11,
"509999": 11}
return activity_var[activity]
def activityNumberToStringName(activity):
act = { 1: 'Sleep',
2: 'Personal Care',
3: 'Housework',
4: 'Child Care',
5: 'Adult Care',
6: 'Work and Education',
7: 'Shopping',
8: 'TV Watching',
9: 'Eating',
10: 'Leisure',
11: 'Travel and Other'
}
return act[activity] | ATUS-Transformer | /ATUS_Transformer-1.0.1.tar.gz/ATUS_Transformer-1.0.1/src/atus_transformer/activityDictionary.py | activityDictionary.py |
Introduction
============
ATpy is a high-level package providing a way to manipulate tables of
astronomical data in a uniform way.
Documentation is available from [http://atpy.readthedocs.org/]
ATpy is released under an MIT open-source license
Please Note
===========
Much of ATpy’s functionality has now been incorporated into
(Astropy)[http://www.astropy.org], and while we will continue to fix bugs, we
are no longer actively developing new features in ATpy, instead focusing our
efforts on Astropy. If you are a new user, and do not need the SQL-querying
functionality, we recommend using (Astropy
Tables)[http://docs.astropy.org/en/stable/table/] directly.
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/README.md | README.md |
Installing ATpy
===============
Requirements
------------
ATpy requires the following:
- Python 2.6 or later
http://www.python.org
- Numpy 1.5 or later
http://numpy.org/
- Astropy 0.2 or later
http://www.astropy.org
The following packages are optional, but are required to read/write to certain
formats:
- h5py 1.3.0 or later (for HDF5 tables)
http://www.h5py.org
- MySQL-python 1.2.2 or later (for MySQL tables)
http://sourceforge.net/projects/mysql-python
- PyGreSQL 3.8.1 or later (for PostGreSQL tables)
http://www.pygresql.org/
Installation
------------
To install ATpy, simply run:
python setup.py install
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/INSTALL.md | INSTALL.md |
from __future__ import print_function, division
from distutils import version
import numpy as np
import warnings
import math
import sys
# SQLite
import sqlite3
# SQLite
MySQLdb_minimum_version = version.LooseVersion('1.2.2')
try:
import MySQLdb
import MySQLdb.constants.FIELD_TYPE as mysqlft
if version.LooseVersion(MySQLdb.__version__) < MySQLdb_minimum_version:
raise
MySQLdb_installed = True
except:
MySQLdb_installed = False
mysql_types = {}
if MySQLdb_installed:
for variable in list(dir(mysqlft)):
if variable[0] != '_':
code = mysqlft.__getattribute__(variable)
mysql_types[code] = variable
def _check_MySQLdb_installed():
if not MySQLdb_installed:
raise Exception("Cannot read/write MySQL tables - MySQL-python " + \
MySQLdb_minimum_version.vstring + " or later required")
# SQLite
PyGreSQL_minimum_version = version.LooseVersion('3.8.1')
try:
import pgdb
PyGreSQL_installed = True
except:
PyGreSQL_installed = False
def _check_PyGreSQL_installed():
if not PyGreSQL_installed:
raise Exception("Cannot read/write PostGreSQL tables - PyGreSQL " + \
PyGreSQL_minimum_version.vstring + " or later required")
# Type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "BOOL"
type_dict[np.uint8] = "TINYINT"
type_dict[np.uint16] = "SMALLINT"
type_dict[np.uint32] = "INT"
type_dict[np.uint64] = "BIGINT"
type_dict[np.int8] = "TINYINT"
type_dict[np.int16] = "SMALLINT"
type_dict[np.int32] = "INT"
type_dict[np.int64] = "BIGINT"
type_dict[np.float32] = "FLOAT"
type_dict[np.float64] = "DOUBLE PRECISION"
type_dict[np.str] = "TEXT"
type_dict[np.string_] = "TEXT"
type_dict[str] = "TEXT"
# Reverse type conversion dictionary
type_dict_rev = {}
type_dict_rev['bool'] = np.bool_
type_dict_rev['tiny'] = np.int8
type_dict_rev['tinyint'] = np.int8
type_dict_rev['short'] = np.int16
type_dict_rev['smallint'] = np.int16
type_dict_rev['int2'] = np.int16
type_dict_rev['int'] = np.int32
type_dict_rev['int4'] = np.int32
type_dict_rev['integer'] = np.int32
type_dict_rev['int8'] = np.int64
type_dict_rev['bigint'] = np.int64
type_dict_rev['long'] = np.int64
type_dict_rev['longlong'] = np.int64
type_dict_rev['float'] = np.float32
type_dict_rev['float4'] = np.float32
type_dict_rev['float8'] = np.float64
type_dict_rev['double'] = np.float64
type_dict_rev['double precision'] = np.float64
type_dict_rev['real'] = np.float64
type_dict_rev['text'] = np.str
type_dict_rev['varchar'] = np.str
type_dict_rev['blob'] = np.str
type_dict_rev['timestamp'] = np.str
type_dict_rev['datetime'] = np.str
type_dict_rev['date'] = np.str
type_dict_rev['var_string'] = np.str
type_dict_rev['decimal'] = np.str
type_dict_rev['numeric'] = np.str
type_dict_rev['enum'] = np.str
# Define symbol to use in insert statement
insert_symbol = {}
insert_symbol['sqlite'] = "?"
insert_symbol['mysql'] = "%s"
insert_symbol['postgres'] = "%s"
# Define quote symbol for column names
quote = {}
quote['sqlite'] = '`'
quote['mysql'] = '`'
quote['postgres'] = '"'
def numpy_type(sql_type):
'''
Returns the numpy type corresponding to an SQL type
Required arguments:
*sql_type*: [ string ]
The SQL type to find the numpy type for
'''
unsigned = 'unsigned' in sql_type
sql_type = sql_type.split('(')[0].lower()
if not sql_type in type_dict_rev:
print("WARNING: need to define reverse type for " + str(sql_type))
print(" Please report this on the ATpy forums!")
print(" This type has been converted to a string")
sql_type = 'text'
dtype = type_dict_rev[sql_type]
if unsigned:
if dtype == np.int8:
return np.uint8
elif dtype == np.int16:
return np.uint16
elif dtype == np.int32:
return np.uint32
elif dtype == np.int64:
return np.uint64
else:
raise Exception("Unexpected unsigned attribute for non-integer column")
else:
return dtype
def list_tables(cursor, dbtype):
'''
List all tables in a given SQL database
Required Arguments:
*cursor*: [ DB API cursor object ]
A cursor for the current database in the DB API formalism
*dbtype*: [ 'sqlite' | 'mysql' | 'postgres' ]
The type of database
'''
tables = {}
if dbtype=='sqlite':
table_names = cursor.execute("select name from sqlite_master where \
type = 'table'").fetchall()
if len(table_names) == 1:
table_names = table_names[0]
for i, table_name in enumerate(table_names):
if type(table_name) == tuple:
table_name = table_name[0]
if sys.version_info[0] > 2:
tables[table_name] = table_name
else:
tables[str(table_name.encode())] = str(table_name.encode())
elif dbtype=='mysql':
cursor.execute('SHOW TABLES;')
for i, table_name in enumerate(cursor):
tables[str(table_name[0])] = str(table_name[0])
elif dbtype=='postgres':
cursor.execute("SELECT table_name FROM information_schema.tables \
WHERE table_schema NOT IN ('pg_catalog', 'information_schema');")
for i, table_name in enumerate(cursor.fetchall()):
tables[str(table_name[0])] = str(table_name[0])
else:
raise Exception('dbtype should be one of sqlite/mysql/postgres')
return tables
def column_info(cursor, dbtype, table_name):
'''
List all columns in a given SQL table
Required Arguments:
*cursor*: [ DB API cursor object ]
A cursor for the current database in the DB API formalism
*dbtype*: [ 'sqlite' | 'mysql' | 'postgres' ]
The type of database
*table_name*: [ string ]
The name of the table to get column information about
'''
names, types, primary_keys = [], [], []
if dbtype=='sqlite':
for column in cursor.execute('pragma table_info(' + \
table_name + ')').fetchall():
names.append(str(column[1]))
if "INT" in column[2]:
types.append(np.int64)
else:
types.append(numpy_type(column[2]))
if column[5] == 1:
primary_keys.append(str(column[1]))
elif dbtype=='mysql':
cursor.execute('DESCRIBE ' + table_name)
for column in cursor:
types.append(numpy_type(column[1]))
names.append(str(column[0]))
if column[3] == 'PRI':
primary_keys.append(str(column[0]))
elif dbtype=='postgres':
cursor.execute('SELECT * FROM ' + table_name + ' WHERE 1=0')
for column in cursor.description:
types.append(numpy_type(column[1]))
names.append(str(column[0]))
return names, types, primary_keys
def column_info_desc(dbtype, description, column_types_dict):
names, types = [], []
if dbtype=='sqlite':
for column in description:
names.append(column[0])
types.append(column_types_dict[column[0]])
elif dbtype=='mysql':
for column in description:
names.append(column[0])
types.append(numpy_type(mysql_types[column[1]]))
elif dbtype=='postgres':
for column in description:
names.append(column[0])
types.append(numpy_type(column[1]))
return names, types
def connect_database(dbtype, *args, **kwargs):
'''
Connect to a database and return a connection handle
Required Arguments:
*dbtype*: [ 'sqlite' | 'mysql' | 'postgres' ]
The type of database
All other arguments are passed to the relevant modules, specifically:
- sqlite3.connect() for SQLite
- MySQLdb.connect() for MySQL
- pgdb.connect() for PostgreSQL
'''
if dbtype == 'sqlite':
connection = sqlite3.connect(*args, **kwargs)
elif dbtype == 'mysql':
_check_MySQLdb_installed()
connection = MySQLdb.connect(*args, **kwargs)
elif dbtype == 'postgres':
_check_PyGreSQL_installed()
connection = pgdb.connect(*args, **kwargs)
else:
raise Exception('dbtype should be one of sqlite/mysql/postgres')
cursor = connection.cursor()
return connection, cursor
def drop_table(cursor, table_name):
'''
Drop a table form a given SQL database
Required Arguments:
*cursor*: [ DB API cursor object ]
A cursor for the current database in the DB API formalism
*table_name*: [ string ]
The name of the table to get column information about
'''
cursor.execute('DROP TABLE ' + table_name + ';')
return
def create_table(cursor, dbtype, table_name, columns, primary_key=None):
'''
Create a table in an SQL database
Required Arguments:
*cursor*: [ DB API cursor object ]
A cursor for the current database in the DB API formalism
*dbtype*: [ 'sqlite' | 'mysql' | 'postgres' ]
The type of database
*table_name*: [ string ]
The name of the table to get column information about
*columns*: [ list of tuples ]
The names and types of all the columns
Optional Arguments:
*primary_key* [ string ]
The column to use as a primary key
'''
query = 'create table ' + table_name + ' ('
for i, column in enumerate(columns):
if i > 0:
query += ", "
column_name = column[0]
column_type = type_dict[column[1]]
# PostgreSQL does not support TINYINT
if dbtype == 'postgres' and column_type == 'TINYINT':
column_type = 'SMALLINT'
# PostgreSQL does not support unsigned integers
if dbtype == 'postgres':
if column[1] == np.uint16:
warnings.warn("uint16 unsupported - converting to int32")
column_type = type_dict[np.int32]
elif column[1] == np.uint32:
warnings.warn("uint32 unsupported - converting to int64")
column_type = type_dict[np.int64]
elif column[1] == np.uint64:
raise Exception("uint64 unsupported")
# MySQL can take an UNSIGNED attribute
if dbtype == 'mysql' and column[1] in [np.uint8, np.uint16, np.uint32, np.uint64]:
column_type += " UNSIGNED"
# SQLite only has one integer type
if dbtype == 'sqlite' and "INT" in column_type:
column_type = "INTEGER"
# SQLite doesn't support uint64
if dbtype == 'sqlite' and column[1] == np.uint64:
raise Exception("SQLite tables do not support unsigned 64-bit ints")
if dbtype == 'postgres' and column[1] == np.float32:
column_type = "REAL"
query += quote[dbtype] + column_name + quote[dbtype] + " " + \
column_type
if primary_key:
query += ", PRIMARY KEY (%s%s%s)" % \
(quote[dbtype],primary_key,quote[dbtype])
query += ")"
cursor.execute(query)
return
def insert_row(cursor, dbtype, table_name, row, fixnan=False):
'''
Insert a row into an SQL database (assumes all columns are specified)
Required Arguments:
*cursor*: [ DB API cursor object ]
A cursor for the current database in the DB API formalism
*dbtype*: [ 'sqlite' | 'mysql' | 'postgres' ]
The type of database
*table_name*: [ string ]
The name of the table to get column information about
*row*: [ tuple ]
A tuple containing all the values to insert into the row
'''
query = 'insert into ' + table_name + ' values ('
query += (insert_symbol[dbtype] + ', ') * (len(row) - 1)
query += insert_symbol[dbtype] + ")"
if fixnan:
if dbtype=='postgres':
for i,e in enumerate(row):
if type(e) == float:
if math.isnan(e):
row[i] = str(e)
elif dbtype=='mysql':
for i,e in enumerate(row):
if type(e) == float:
if math.isnan(e):
row[i] = None
cursor.execute(query, row)
return | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/sqlhelper.py | sqlhelper.py |
from __future__ import print_function, division
import os
import sys
import numpy as np
import warnings
from .helpers import smart_mask, format_length
from .decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
# Define type conversion from IPAC table to numpy arrays
type_dict = {}
type_dict['i'] = np.int64
type_dict['int'] = np.int64
type_dict['integer'] = np.int64
type_dict['long'] = np.int64
type_dict['double'] = np.float64
type_dict['float'] = np.float32
type_dict['real'] = np.float32
type_dict['char'] = np.str
type_dict['date'] = np.str
type_rev_dict = {}
type_rev_dict[np.bool_] = "int"
type_rev_dict[np.int8] = "int"
type_rev_dict[np.int16] = "int"
type_rev_dict[np.int32] = "int"
type_rev_dict[np.int64] = "int"
type_rev_dict[np.uint8] = "int"
type_rev_dict[np.uint16] = "int"
type_rev_dict[np.uint32] = "int"
type_rev_dict[np.uint64] = "int"
type_rev_dict[np.float32] = "float"
type_rev_dict[np.float64] = "double"
type_rev_dict[np.str] = "char"
type_rev_dict[np.string_] = "char"
type_rev_dict[str] = "char"
invalid = {}
invalid[np.int32] = -np.int64(2**31-1)
invalid[np.int64] = -np.int64(2**63-1)
invalid[np.float32] = np.float32(np.nan)
invalid[np.float64] = np.float64(np.nan)
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, definition=3, verbose=False, smart_typing=False):
'''
Read a table from a IPAC file
Required Arguments:
*filename*: [ string ]
The IPAC file to read the table from
Optional Keyword Arguments:
*definition*: [ 1 | 2 | 3 ]
The definition to use to read IPAC tables:
1: any character below a pipe symbol belongs to the
column on the left, and any characters below the
first pipe symbol belong to the first column.
2: any character below a pipe symbol belongs to the
column on the right.
3: no characters should be present below the pipe
symbols (default).
*smart_typing*: [ True | False ]
Whether to try and save memory by using the smallest
integer type that can contain a column. For example,
a column containing only values between 0 and 255 can
be stored as an unsigned 8-bit integer column. The
default is false, so that all integer columns are
stored as 64-bit integers.
'''
if not definition in [1, 2, 3]:
raise Exception("definition should be one of 1/2/3")
self.reset()
# Open file for reading
f = open(filename, 'r')
line = f.readline()
# Read in comments and keywords
while True:
char1 = line[0:1]
char2 = line[1:2]
if char1 != '\\':
break
if char2==' ' or not '=' in line: # comment
self.add_comment(line[1:])
else: # keyword
pos = line.index('=')
key, value = line[1:pos], line[pos + 1:]
value = value.replace("'", "").replace('"', '')
key, value = key.strip(), value.strip()
self.add_keyword(key, value)
line = f.readline()
# Column headers
l = 0
units = {}
nulls = {}
while True:
char1 = line[0:1]
if char1 != "|":
break
if l==0: # Column names
line = line.replace('-', ' ').strip()
# Find all pipe symbols
pipes = []
for i, c in enumerate(line):
if c=='|':
pipes.append(i)
# Find all names
names = line.replace(" ", "").split("|")[1:-1]
elif l==1: # Data types
line = line.replace('-', ' ').strip()
types = dict(zip(names, \
line.replace(" ", "").split("|")[1:-1]))
elif l==2: # Units
units = dict(zip(names, \
line.replace(" ", "").split("|")[1:-1]))
else: # Null values
nulls = dict(zip(names, \
line.replace(" ", "").split("|")[1:-1]))
line = f.readline()
l = l + 1
if len(pipes) != len(names) + 1:
raise "An error occured while reading the IPAC table"
if len(units)==0:
for name in names:
units[name]=''
if len(nulls)==0:
nulls_given = False
for name in names:
nulls[name]=''
else:
nulls_given = True
# Pre-compute numpy column types
numpy_types = {}
for name in names:
numpy_types[name] = type_dict[types[name]]
# Data
array = {}
for name in names:
array[name] = []
while True:
if line.strip() == '':
break
for i in range(len(pipes)-1):
first, last = pipes[i] + 1, pipes[i + 1]
if definition==1:
last = last + 1
if first==1:
first=0
elif definition==2:
first = first - 1
if i + 1==len(pipes)-1:
item = line[first:].strip()
else:
item = line[first:last].strip()
if item.lower() == 'null' and nulls[names[i]] != 'null':
if nulls[names[i]] == '':
if verbose:
warnings.warn("WARNING: found unexpected 'null' value. Setting null value for column "+names[i]+" to 'null'")
nulls[names[i]] = 'null'
nulls_given = True
else:
raise Exception("null value for column "+names[i]+" is set to "+nulls[i]+" but found value 'null'")
array[names[i]].append(item)
line = f.readline()
# Check that null values are of the correct type
if nulls_given:
for name in names:
try:
n = numpy_types[name](nulls[name])
nulls[name] = n
except:
n = invalid[numpy_types[name]]
for i, item in enumerate(array[name]):
if item == nulls[name]:
array[name][i] = n
if verbose:
if len(str(nulls[name]).strip()) == 0:
warnings.warn("WARNING: empty null value for column "+name+" set to "+str(n))
else:
warnings.warn("WARNING: null value for column "+name+" changed from "+str(nulls[name])+" to "+str(n))
nulls[name] = n
# Convert to numpy arrays
for name in names:
if smart_typing:
dtype = None
low = min(array[name])
high = max(array[name])
if types[name] in ['i', 'int', 'integer']:
low, high = long(low), long(high)
for nt in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64]:
if low >= np.iinfo(nt).min and high <= np.iinfo(nt).max:
dtype = nt
break
elif types[name] in ['long']:
low, high = long(low), long(high)
for nt in [np.uint64, np.int64]:
if low >= np.iinfo(nt).min and high <= np.iinfo(nt).max:
dtype = nt
break
elif types[name] in ['float', 'real']:
low, high = float(low), float(high)
for nt in [np.float32, np.float64]:
if low >= np.finfo(nt).min and high <= np.finfo(nt).max:
dtype = nt
break
else:
dtype = type_dict[types[name]]
else:
dtype = type_dict[types[name]]
# If max integer is larger than 2**63 then use uint64
if dtype == np.int64:
if max([long(x) for x in array[name]]) > 2**63:
dtype = np.uint64
warnings.warn("using type uint64 for column %s" % name)
array[name] = np.array(array[name], dtype=dtype)
if smart_typing:
if np.min(array) >= 0 and np.max(array) <= 1:
array = array == 1
if self._masked:
self.add_column(name, array[name], \
mask=smart_mask(array[name], nulls[name]), unit=units[name], \
fill=nulls[name])
else:
self.add_column(name, array[name], \
null=nulls[name], unit=units[name])
def write(self, filename, overwrite=False):
'''
Write the table to an IPAC file
Required Arguments:
*filename*: [ string ]
The IPAC file to write the table to
'''
self._raise_vector_columns()
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
# Open file for writing
f = open(filename, 'w')
for key in self.keywords:
value = self.keywords[key]
f.write("\\" + key + "=" + str(value) + "\n")
for comment in self.comments:
f.write("\\ " + comment + "\n")
# Compute width of all columns
width = {}
format = {}
line_names = ""
line_types = ""
line_units = ""
line_nulls = ""
width = {}
for name in self.names:
dtype = self.columns[name].dtype
coltype = type_rev_dict[dtype.type]
colunit = self.columns[name].unit
if self._masked:
colnull = self.data[name].fill_value
else:
colnull = self.columns[name].null
if colnull:
colnull = ("%" + self.columns[name].format) % colnull
else:
colnull = ''
# Adjust the format for each column
width[name] = format_length(self.columns[name].format)
max_width = max(len(name), len(coltype), len(colunit), \
len(colnull))
if max_width > width[name]:
width[name] = max_width
sf = "%" + str(width[name]) + "s"
line_names = line_names + "|" + (sf % name)
line_types = line_types + "|" + (sf % coltype)
line_units = line_units + "|" + (sf % colunit)
line_nulls = line_nulls + "|" + (sf % colnull)
line_names = line_names + "|\n"
line_types = line_types + "|\n"
line_units = line_units + "|\n"
line_nulls = line_nulls + "|\n"
f.write(line_names)
f.write(line_types)
if len(line_units.replace("|", "").strip()) > 0:
f.write(line_units)
if len(line_nulls.replace("|", "").strip()) > 0:
f.write(line_nulls)
for i in range(self.__len__()):
line = ""
for name in self.names:
if self.columns[name].dtype == np.uint64:
item = (("%" + self.columns[name].format) % long(self.data[name][i]))
elif sys.version_info[0] >= 3 and self.columns[name].dtype.type == np.bytes_:
item = (("%" + self.columns[name].format) % self.data[name][i].decode('utf-8'))
else:
item = (("%" + self.columns[name].format) % self.data[name][i])
item = ("%" + str(width[name]) + "s") % item
if len(item) > width[name]:
raise Exception('format for column %s (%s) is not wide enough to contain data' % (name, self.columns[name].format))
line = line + " " + item
line = line + " \n"
f.write(line)
f.close() | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/ipactable.py | ipactable.py |
from __future__ import print_function, division
import sys
if sys.version_info[0] > 2:
from urllib.request import Request, urlopen
else:
from urllib2 import Request, urlopen
import tempfile
import gzip
import bz2
from .decorator import decorator
def auto_download_to_file(f):
return decorator(_auto_download_to_file, f)
def _auto_download_to_file(read, table, filename, *args, **kwargs):
if isinstance(filename, basestring):
# Check whether filename is in fact a URL
for protocol in ['http', 'ftp']:
if filename.lower().startswith('%s://' % protocol):
# Retrieve file
req = Request(filename)
response = urlopen(req)
result = response.read()
# Write it out to a temporary file
output = tempfile.NamedTemporaryFile()
output.write(result)
output.flush()
# Call read method
return read(table, output.name, *args, **kwargs)
# Otherwise just proceed as usual
return read(table, filename, *args, **kwargs)
def auto_decompress_to_fileobj(f):
return decorator(_auto_decompress_to_fileobj, f)
def _auto_decompress_to_fileobj(read, table, filename, *args, **kwargs):
if isinstance(filename, basestring):
# Read in first few characters from file to determine compression
header = open(filename, 'rb').read(4)
if header[:2] == '\x1f\x8b': # gzip compression
return read(table, gzip.GzipFile(filename), *args, **kwargs)
elif header[:3] == 'BZh': # bzip compression
return read(table, bz2.BZ2File(filename), *args, **kwargs)
else:
return read(table, filename, *args, **kwargs)
return read(table, filename, *args, **kwargs)
def auto_fileobj_to_file(f):
return decorator(_auto_fileobj_to_file, f)
def _auto_fileobj_to_file(read, table, filename, *args, **kwargs):
if hasattr(filename, 'read'): # is a file object
# Write it out to a temporary file
output = tempfile.NamedTemporaryFile()
output.write(filename.read())
output.flush()
# Update filename
filename = output.name
return read(table, filename, *args, **kwargs) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/decorators.py | decorators.py |
from __future__ import print_function, division
import warnings
import urllib
import sys
if sys.version_info[0] > 2:
from urllib.request import Request, urlopen
else:
from urllib2 import Request, urlopen
import tempfile
import string
from xml.etree.ElementTree import ElementTree
'''
API from
http://irsa.ipac.caltech.edu/applications/Gator/GatorAid/irsa/catsearch.html
The URL of the IRSA catalog query service, CatQuery, is
http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query
The service accepts the following keywords, which are analogous to the search
fields on the Gator search form:
spatial Required Type of spatial query: Cone, Box, Polygon, and NONE
polygon Convex polygon of ra dec pairs, separated by comma(,)
Required if spatial=polygon
radius Cone search radius
Optional if spatial=Cone, otherwise ignore it
(default 10 arcsec)
radunits Units of a Cone search: arcsec, arcmin, deg.
Optional if spatial=Cone
(default='arcsec')
size Width of a box in arcsec
Required if spatial=Box.
objstr Target name or coordinate of the center of a spatial
search center. Target names must be resolved by
SIMBAD or NED.
Required only when spatial=Cone or spatial=Box.
Examples: 'M31'
'00 42 44.3 -41 16 08'
'00h42m44.3s -41d16m08s'
catalog Required Catalog name in the IRSA database management system.
outfmt Optional Defines query's output format.
6 - returns a program interface in XML
3 - returns a VO Table (XML)
2 - returns SVC message
1 - returns an ASCII table
0 - returns Gator Status Page in HTML (default)
desc Optional Short description of a specific catalog, which will
appear in the result page.
order Optional Results ordered by this column.
constraint Optional User defined query constraint(s)
Note: The constraint should follow SQL syntax.
onlist Optional 1 - catalog is visible through Gator web interface
(default)
0 - catalog has been ingested into IRSA but not yet
visible through web interface.
This parameter will generally only be set to 0 when
users are supporting testing and evaluation of new
catalogs at IRSA's request.
If onlist=0, the following parameters are required:
server Symbolic DataBase Management Server (DBMS) name
database Name of Database.
ddfile The data dictionary file is used to get column
information for a specific catalog.
selcols Target column list with value separated by a comma(,)
The input list always overwrites default selections
defined by a data dictionary.
outrows Number of rows retrieved from database.
The retrieved row number outrows is always less than or
equal to available to be retrieved rows under the same
constraints.
'''
def read(self, spatial, catalog, objstr=None, radius=None,
units='arcsec', size=None, polygon=None):
'''
Query the NASA/IPAC Infrared Science Archive (IRSA)
Required Arguments:
*spatial* [ 'Cone' | 'Box' | 'Polygon' ]
The type of query to execute
*catalog* [ string ]
One of the catalogs listed by ``atpy.irsa_service.list_catalogs()``
Optional Keyword Arguments:
*objstr* [ str ]
This string gives the position of the center of the cone or box if
performing a cone or box search. The string can give coordinates
in various coordinate systems, or the name of a source that will
be resolved on the server (see `here
<http://irsa.ipac.caltech.edu/search_help.html>`_ for more
details). Required if spatial is 'Cone' or 'Box'.
*radius* [ float ]
The radius for the cone search. Required if spatial is 'Cone'
*units* [ 'arcsec' | 'arcmin' | 'deg' ]
The units for the cone search radius. Defaults to 'arcsec'.
*size* [ float ]
The size of the box to search in arcseconds. Required if spatial
is 'Box'.
*polygon* [ list of tuples ]
The list of (ra, dec) pairs, in decimal degrees, outlinining the
polygon to search in. Required if spatial is 'Polygon'
'''
base_url = 'http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query'
self.reset()
# Convert to lowercase
spatial = spatial.capitalize()
# Set basic options
options = {}
options['spatial'] = spatial
options['catalog'] = catalog
options['outfmt'] = 3
if spatial == "Cone":
if not radius:
raise Exception("radius is required for Cone search")
options['radius'] = radius
if not units:
raise Exception("units is required for Cone search")
if units not in ['arcsec', 'arcmin', 'deg']:
raise Exception("units should be one of arcsec/arcmin/deg")
options['radunits'] = units
if not objstr:
raise Exception("objstr is required for Cone search")
options['objstr'] = objstr
elif spatial == "Box":
if not size:
raise Exception("size is required for Box search")
options['size'] = size
if not objstr:
raise Exception("objstr is required for Cone search")
options['objstr'] = objstr
elif spatial == "Polygon":
if not polygon:
raise Exception("polygon is required for Polygon search")
pairs = []
for pair in polygon:
if pair[1] > 0:
pairs.append(str(pair[0]) + '+' + str(pair[1]))
else:
pairs.append(str(pair[0]) + str(pair[1]))
options['polygon'] = string.join(pairs, ',')
elif spatial == "None":
options['spatial'] = 'NONE'
else:
raise Exception("spatial should be one of cone/box/polygon/none")
# Construct query URL
url = base_url + "?" + \
string.join(["%s=%s" % (x, urllib.quote_plus(str(options[x]))) for x in options], "&")
# Request page
req = Request(url)
response = urlopen(req)
result = response.read()
# Check if results were returned
if 'The catalog is not on the list' in result:
raise Exception("Catalog not found")
# Check that object name was not malformed
if 'Either wrong or missing coordinate/object name' in result:
raise Exception("Malformed coordinate/object name")
# Check that the results are not of length zero
if len(result) == 0:
raise Exception("The IRSA server sent back an empty reply")
# Write table to temporary file
output = tempfile.NamedTemporaryFile()
output.write(result)
output.flush()
# Read it in using ATpy VO reader
self.read(output.name, type='vo', verbose=False)
# Set table name
self.table_name = "IRSA_query"
# Check if table is empty
if len(self) == 0:
warnings.warn("Query returned no results, so the table will be empty")
# Remove temporary file
output.close()
def list_catalogs():
url = 'http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-scan?mode=xml'
req = Request(url)
response = urlopen(req)
tree = ElementTree()
for catalog in tree.parse(response).findall('catalog'):
catname = catalog.find('catname').text
desc = catalog.find('desc').text
print("%30s %s" % (catname, desc)) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/irsa_service.py | irsa_service.py |
from __future__ import print_function, division
import os
from .decorators import auto_download_to_file, auto_decompress_to_fileobj
# Thanks to Moritz Guenther for providing the initial code used to create this file
from astropy.io import ascii
def read_cds(self, filename, **kwargs):
'''
Read data from a CDS table (also called Machine Readable Tables) file
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Cds, **kwargs)
def read_daophot(self, filename, **kwargs):
'''
Read data from a DAOphot table
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Daophot, **kwargs)
def read_latex(self, filename, **kwargs):
'''
Read data from a Latex table
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Latex, **kwargs)
def write_latex(self, filename, **kwargs):
'''
Write data to a Latex table
Required Arguments:
*filename*: [ string ]
The file to write the table to
Keyword Arguments are passed to astropy.io.ascii
'''
write_ascii(self, filename, Writer=ascii.Latex, **kwargs)
def read_rdb(self, filename, **kwargs):
'''
Read data from an RDB table
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Rdb, **kwargs)
def write_rdb(self, filename, **kwargs):
'''
Write data to an RDB table
Required Arguments:
*filename*: [ string ]
The file to write the table to
Keyword Arguments are passed to astropy.io.ascii
'''
write_ascii(self, filename, Writer=ascii.Rdb, **kwargs)
# astropy.io.ascii can handle file objects
@auto_download_to_file
@auto_decompress_to_fileobj
def read_ascii(self, filename, **kwargs):
'''
Read a table from an ASCII file using astropy.io.ascii
Optional Keyword Arguments:
Reader - Reader class (default= BasicReader )
Inputter - Inputter class
delimiter - column delimiter string
comment - regular expression defining a comment line in table
quotechar - one-character string to quote fields containing special characters
header_start - line index for the header line not counting comment lines
data_start - line index for the start of data not counting comment lines
data_end - line index for the end of data (can be negative to count from end)
converters - dict of converters
data_Splitter - Splitter class to split data columns
header_Splitter - Splitter class to split header columns
names - list of names corresponding to each data column
include_names - list of names to include in output (default=None selects all names)
exclude_names - list of names to exlude from output (applied after include_names)
Note that the Outputter argument is not passed to astropy.io.ascii.
See the astropy.io.ascii documentation at http://docs.astropy.org/en/latest/io/ascii/index.html for more details.
'''
self.reset()
if 'Outputter' in kwargs:
kwargs.pop('Outputter')
table = ascii.read(filename, **kwargs)
for name in table.colnames:
self.add_column(name, table[name])
def write_ascii(self, filename, **kwargs):
'''
Read a table from an ASCII file using astropy.io.ascii
Optional Keyword Arguments:
Writer - Writer class (default= Basic)
delimiter - column delimiter string
write_comment - string defining a comment line in table
quotechar - one-character string to quote fields containing special characters
formats - dict of format specifiers or formatting functions
names - list of names corresponding to each data column
include_names - list of names to include in output (default=None selects all names)
exclude_names - list of names to exlude from output (applied after include_names)
See the astropy.io.ascii documentation at http://docs.astropy.org/en/latest/io/ascii/index.html for more details.
'''
if 'overwrite' in kwargs:
overwrite = kwargs.pop('overwrite')
else:
overwrite = False
if type(filename) is str and os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
ascii.write(self.data, filename, **kwargs) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/asciitables.py | asciitables.py |
_readers = {}
_writers = {}
_set_readers = {}
_set_writers = {}
_extensions = {}
def register_reader(ttype, function, override=False):
'''
Register a table reader function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when reading.
*function*: [ function ]
The function to read in a single table.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _readers or override:
_readers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_writer(ttype, function, override=False):
'''
Register a table writer function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when writing.
*function*: [ function ]
The function to write out a single table.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _writers or override:
_writers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_set_reader(ttype, function, override=False):
'''
Register a table set reader function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when reading.
*function*: [ function ]
The function to read in a table set.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _set_readers or override:
_set_readers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_set_writer(ttype, function, override=False):
'''
Register a table set writer function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when writing.
*function*: [ function ]
The function to write out a table set.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _set_writers or override:
_set_writers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_extensions(ttype, extensions, override=False):
'''
Associate file extensions with a specific table type
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that is used to
specify the table type when reading.
*extensions*: [ string or list or tuple ]
List of valid extensions for the table type - used for auto type
selection. All extensions should be given in lowercase as file
extensions are converted to lowercase before checking against this
list. If a single extension is given, it can be specified as a
string rather than a list of strings
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any extensions if already present.
'''
if type(extensions) == str:
extensions = [extensions]
for extension in extensions:
if not extension in _extensions or override:
_extensions[extension] = ttype
else:
raise Exception("Extension %s is already defined" % extension)
def _determine_type(string, verbose):
if not isinstance(string, basestring):
raise Exception('Could not determine table type (non-string argument)')
s = str(string).lower()
if not '.' in s:
extension = s
else:
extension = s.split('.')[-1]
if extension.lower() in ['gz', 'bz2', 'bzip2']:
extension = s.split('.')[-2]
if extension in _extensions:
table_type = _extensions[extension]
if verbose:
print("Auto-detected table type: %s" % table_type)
else:
raise Exception('Could not determine table type for extension %s' % extension)
return table_type
from . import fitstable
register_reader('fits', fitstable.read)
register_writer('fits', fitstable.write)
register_set_reader('fits', fitstable.read_set)
register_set_writer('fits', fitstable.write_set)
register_extensions('fits', ['fit', 'fits'])
from . import votable
register_reader('vo', votable.read)
register_writer('vo', votable.write)
register_set_reader('vo', votable.read_set)
register_set_writer('vo', votable.write_set)
register_extensions('vo', ['xml', 'vot'])
from . import ipactable
register_reader('ipac', ipactable.read)
register_writer('ipac', ipactable.write)
register_extensions('ipac', ['ipac', 'tbl'])
from . import sqltable
register_reader('sql', sqltable.read)
register_writer('sql', sqltable.write)
register_set_reader('sql', sqltable.read_set)
register_set_writer('sql', sqltable.write_set)
register_extensions('sql', ['sqlite', 'postgres', 'mysql', 'db'])
from . import asciitables
register_reader('cds', asciitables.read_cds)
register_reader('mrt', asciitables.read_cds)
register_reader('latex', asciitables.read_latex)
register_writer('latex', asciitables.write_latex)
register_reader('rdb', asciitables.read_rdb)
register_writer('rdb', asciitables.write_rdb)
register_extensions('rdb', ['rdb'])
register_reader('daophot', asciitables.read_daophot)
register_reader('ascii', asciitables.read_ascii)
register_writer('ascii', asciitables.write_ascii)
from . import hdf5table
register_reader('hdf5', hdf5table.read)
register_set_reader('hdf5', hdf5table.read_set)
register_writer('hdf5', hdf5table.write)
register_set_writer('hdf5', hdf5table.write_set)
register_extensions('hdf5', ['hdf5', 'h5'])
from . import irsa_service
register_reader('irsa', irsa_service.read)
from . import vo_conesearch
register_reader('vo_conesearch', vo_conesearch.read)
from . import htmltable
register_writer('html', htmltable.write)
register_extensions('html', ['html', 'htm']) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/registry.py | registry.py |
from __future__ import print_function, division
# NOTE: docstring is long and so only written once!
# It is copied for the other routines
import warnings
import numpy as np
from . import sqlhelper as sql
from .exceptions import TableException, ExistingTableException
invalid = {}
invalid[np.uint8] = np.iinfo(np.uint8).max
invalid[np.uint16] = np.iinfo(np.uint16).max
invalid[np.uint32] = np.iinfo(np.uint32).max
invalid[np.uint64] = np.iinfo(np.int64).max
invalid[np.int8] = np.iinfo(np.int8).max
invalid[np.int16] = np.iinfo(np.int16).max
invalid[np.int32] = np.iinfo(np.int32).max
invalid[np.int64] = np.iinfo(np.int64).max
invalid[np.float32] = np.float32(np.nan)
invalid[np.float64] = np.float64(np.nan)
def read(self, dbtype, *args, **kwargs):
'''
Required Arguments:
*dbtype*: [ 'sqlite' | 'mysql' | 'postgres' ]
The SQL database type
Optional arguments (only for Table.read() class):
*table*: [ string ]
The name of the table to read from the database (this is only
required if there are more than one table in the database). This
is not required if the query= argument is specified, except if
using an SQLite database.
*query*: [ string ]
An arbitrary SQL query to construct a table from. This can be
any valid SQL command provided that the result is a single
table.
The remaining arguments depend on the database type:
* SQLite:
Arguments are passed to sqlite3.connect(). For a full list of
available arguments, see the help page for sqlite3.connect(). The
main arguments are listed below.
Required arguments:
*dbname*: [ string ]
The name of the database file
* MySQL:
Arguments are passed to MySQLdb.connect(). For a full list of
available arguments, see the documentation for MySQLdb. The main
arguments are listed below.
Optional arguments:
*host*: [ string ]
The host to connect to (default is localhost)
*user*: [ string ]
The user to conenct as (default is current user)
*passwd*: [ string ]
The user password (default is blank)
*db*: [ string ]
The name of the database to connect to (no default)
*port* [ integer ]
The port to connect to (default is 3306)
* PostGreSQL:
Arguments are passed to pgdb.connect(). For a full list of
available arguments, see the help page for pgdb.connect(). The
main arguments are listed below.
*host*: [ string ]
The host to connect to (default is localhost)
*user*: [ string ]
The user to conenct as (default is current user)
*password*: [ string ]
The user password (default is blank)
*database*: [ string ]
The name of the database to connect to (no default)
'''
if 'table' in kwargs:
table = kwargs.pop('table')
else:
table = None
if 'verbose' in kwargs:
verbose = kwargs.pop('verbose')
else:
verbose = True
if 'query' in kwargs:
query = kwargs.pop('query')
else:
query = None
# Erase existing content
self.reset()
connection, cursor = sql.connect_database(dbtype, *args, **kwargs)
# If no table is requested, check that there is only one table
table_names = sql.list_tables(cursor, dbtype)
if len(table_names) == 0:
raise Exception("No table in selected database")
if not query or dbtype == 'sqlite':
if table==None:
if len(table_names) == 1:
table_name = table_names.keys()[0]
else:
raise TableException(table_names, 'table')
else:
table_name = table_names[table]
# Find overall names and types for the table
column_names, column_types, primary_keys = sql.column_info(cursor, dbtype, \
str(table_name))
self.table_name = table_name
else:
column_names = []
column_types = []
primary_keys = []
self.table_name = "sql_query"
if query:
# Execute the query
cursor.execute(query)
if dbtype == 'sqlite':
column_types_dict = dict(zip(column_names, column_types))
else:
column_types_dict = None
# Override column names and types
column_names, column_types = sql.column_info_desc(dbtype, cursor.description, column_types_dict)
else:
cursor = connection.cursor()
cursor.execute('select * from ' + table_name)
results = cursor.fetchall()
if results:
results = np.rec.fromrecords(list(results), \
names = column_names)
else:
raise Exception("SQL query did not return any records")
for i, column in enumerate(results.dtype.names):
if self._masked:
if results[column].dtype.type == np.object_:
mask = np.equal(results[column], None)
if column_types[i] == np.str:
results[column][mask] = "NULL"
else:
results[column][mask] = 0.
mask = mask.astype(np.object_)
else:
mask = None
self.add_column(column, results[column], dtype=column_types[i], mask=mask)
else:
if column_types[i] in invalid:
null = invalid[column_types[i]]
results[column][np.equal(np.array(results[column], dtype=np.object), None)] = null
else:
null = 'None'
self.add_column(column, results[column], dtype=column_types[i], null=null)
# Set primary key if present
if len(primary_keys) == 1:
self.set_primary_key(primary_keys[0])
elif len(primary_keys) > 1:
warnings.warn("ATpy does not yet support multiple primary keys in a single table - ignoring primary key information")
def write(self, dbtype, *args, **kwargs):
self._raise_vector_columns()
# Check if table overwrite is requested
if 'overwrite' in kwargs:
overwrite = kwargs.pop('overwrite')
else:
overwrite = False
# Open the connection
connection, cursor = sql.connect_database(dbtype, *args, **kwargs)
# Check that table name is set
if not self.table_name:
raise Exception("Table name is not set")
else:
table_name = str(self.table_name)
# Check that table name is ok
# todo
# lowercase because pgsql automatically converts
# table names to lower case
# Check if table already exists
existing_tables = sql.list_tables(cursor, dbtype).values()
if table_name in existing_tables or \
table_name.lower() in existing_tables:
if overwrite:
sql.drop_table(cursor, table_name)
else:
raise ExistingTableException()
# Create table
columns = [(name, self.columns[name].dtype.type) \
for name in self.names]
sql.create_table(cursor, dbtype, table_name, columns, primary_key=self._primary_key)
# Insert row
float_column = [self.columns[name].dtype.type in [np.float32, np.float64] for name in self.names]
for i in range(self.__len__()):
row = self.row(i, python_types=True)
sql.insert_row(cursor, dbtype, table_name, row, fixnan=not self._masked)
# Close connection
connection.commit()
cursor.close()
write.__doc__ = read.__doc__
def read_set(self, dbtype, *args, **kwargs):
self.reset()
connection, cursor = sql.connect_database(dbtype, *args, **kwargs)
table_names = sql.list_tables(cursor, dbtype)
cursor.close()
from .basetable import Table
for table in table_names:
kwargs['table'] = table
table = Table()
read(table, dbtype, *args, **kwargs)
self.append(table)
read_set.__doc__ = read.__doc__
def write_set(self, dbtype, *args, **kwargs):
for table_key in self.tables:
write(self.tables[table_key], dbtype, *args, **kwargs)
write_set.__doc__ = write.__doc__ | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/sqltable.py | sqltable.py |
from __future__ import print_function, division
import os
import numpy as np
from astropy.io import fits
from .exceptions import TableException
from .helpers import smart_dtype, smart_mask
from .decorators import auto_download_to_file, auto_fileobj_to_file
standard_keys = ['XTENSION', 'NAXIS', 'NAXIS1', 'NAXIS2', 'TFIELDS', \
'PCOUNT', 'GCOUNT', 'BITPIX', 'EXTNAME']
# Define type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "L"
type_dict[np.int8] = "B"
type_dict[np.uint8] = "B"
type_dict[np.int16] = "I"
type_dict[np.uint16] = "I"
type_dict[np.int32] = "J"
type_dict[np.uint32] = "J"
type_dict[np.int64] = "K"
type_dict[np.uint64] = "K"
type_dict[np.float32] = "E"
type_dict[np.float64] = "D"
type_dict[np.str] = "A"
type_dict[np.string_] = "A"
type_dict[str] = "A"
def _list_tables(filename):
hdulist = fits.open(filename)
tables = {}
for i, hdu in enumerate(hdulist[1:]):
if hdu.header['XTENSION'] in ['BINTABLE', 'ASCIITABLE', 'TABLE']:
tables[i + 1] = hdu.name
hdulist.close()
return tables
# PyFITS can handle compression, so no decompression detection
@auto_download_to_file
@auto_fileobj_to_file
def read(self, filename, hdu=None, memmap=False, verbose=True):
'''
Read a table from a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to read the table from
Optional Keyword Arguments:
*hdu*: [ integer ]
The HDU to read from the FITS file (this is only required
if there are more than one table in the FITS file)
*memmap*: [ bool ]
Whether PyFITS should use memory mapping
'''
self.reset()
# If no hdu is requested, check that there is only one table
if not hdu:
tables = _list_tables(filename)
if len(tables) == 0:
raise Exception("No tables in file")
elif len(tables) == 1:
hdu = tables.keys()[0]
else:
raise TableException(tables, 'hdu')
hdulist = fits.open(filename, memmap=memmap)
hdu = hdulist[hdu]
table = hdu.data
header = hdu.header
columns = hdu.columns
# Construct dtype for table
dtype = []
for i in range(len(hdu.data.dtype)):
name = hdu.data.dtype.names[i]
type = hdu.data.dtype[name]
if type.subdtype:
type, shape = type.subdtype
else:
shape = ()
# Get actual FITS format and zero-point
format, bzero = hdu.columns[i].format, hdu.columns[i].bzero
# Remove numbers from format, to find just type
format = format.strip("1234567890.")
if type.type is np.string_ and format in ['I', 'F', 'E', 'D']:
if format == 'I':
type = np.int64
elif format in ['F', 'E']:
type = np.float32
elif format == 'D':
type = np.float64
if format == 'X' and type.type == np.uint8:
type = np.bool
if len(shape) == 1:
shape = (shape[0] * 8,)
if format == 'L':
type = np.bool
if bzero and format in ['B', 'I', 'J']:
if format == 'B' and bzero == -128:
dtype.append((name, np.int8, shape))
elif format == 'I' and bzero == - np.iinfo(np.int16).min:
dtype.append((name, np.uint16, shape))
elif format == 'J' and bzero == - np.iinfo(np.int32).min:
dtype.append((name, np.uint32, shape))
else:
dtype.append((name, type, shape))
else:
dtype.append((name, type, shape))
dtype = np.dtype(dtype)
if self._masked:
self._setup_table(len(hdu.data), dtype, units=columns.units)
else:
self._setup_table(len(hdu.data), dtype, units=columns.units, \
nulls=columns.nulls)
# Populate the table
for i, name in enumerate(columns.names):
format, bzero = hdu.columns[i].format[-1], hdu.columns[i].bzero
if bzero and format in ['B', 'I', 'J']:
data = np.rec.recarray.field(hdu.data, i)
if format == 'B' and bzero == -128:
data = (data.astype(np.int16) + bzero).astype(np.int8)
elif format == 'I' and bzero == - np.iinfo(np.int16).min:
data = (data.astype(np.int32) + bzero).astype(np.uint16)
elif format == 'J' and bzero == - np.iinfo(np.int32).min:
data = (data.astype(np.int64) + bzero).astype(np.uint32)
else:
data = table.field(name)
else:
data = table.field(name)
self.data[name][:] = data[:]
if self._masked:
if columns.nulls[i] == 'NAN.0':
null = np.nan
elif columns.nulls[i] == 'INF.0':
null = np.inf
else:
null = columns.nulls[i]
self.data[name].mask = smart_mask(data, null)
self.data[name].set_fill_value(null)
for key in header.keys():
if not key[:4] in ['TFOR', 'TDIS', 'TDIM', 'TTYP', 'TUNI'] and \
not key in standard_keys:
self.add_keyword(key, header[key])
try:
header['COMMENT']
except KeyError:
pass
else:
# PyFITS used to define header['COMMENT'] as the last comment read in
# (which was a string), but now defines it as a _HeaderCommentaryCards
# object
if isinstance(header['COMMENT'], basestring):
for comment in header.get_comment():
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
else:
for comment in header['COMMENT']:
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
if hdu.name:
self.table_name = str(hdu.name)
hdulist.close()
return
def _to_hdu(self):
'''
Return the current table as a astropy.io.fits HDU object
'''
columns = []
for name in self.names:
if self._masked:
data = self.data[name].filled()
null = self.data[name].fill_value
if data.ndim > 1:
null = null[0]
if type(null) in [np.bool_, np.bool]:
null = bool(null)
else:
data = self.data[name]
null = self.columns[name].null
unit = self.columns[name].unit
dtype = self.columns[name].dtype
elemwidth = None
if unit == None:
unit = ''
if data.ndim > 1:
elemwidth = str(data.shape[1])
column_type = smart_dtype(dtype)
if column_type == np.string_:
elemwidth = dtype.itemsize
if column_type in type_dict:
if elemwidth:
format = str(elemwidth) + type_dict[column_type]
else:
format = type_dict[column_type]
else:
raise Exception("cannot use numpy type " + str(column_type))
if column_type == np.uint16:
bzero = - np.iinfo(np.int16).min
elif column_type == np.uint32:
bzero = - np.iinfo(np.int32).min
elif column_type == np.uint64:
raise Exception("uint64 unsupported")
elif column_type == np.int8:
bzero = -128
else:
bzero = None
columns.append(fits.Column(name=name, format=format, unit=unit, \
null=null, array=data, bzero=bzero))
hdu = fits.new_table(fits.ColDefs(columns))
hdu.name = self.table_name
for key in self.keywords:
if len(key) > 8:
keyname = "hierarch " + key
else:
keyname = key
try: # PyFITS 3.x
hdu.header[keyname] = self.keywords[key]
except KeyError: # PyFITS 2.x
hdu.header.update(keyname, self.keywords[key])
for comment in self.comments:
hdu.header.add_comment(comment)
return hdu
def write(self, filename, overwrite=False):
'''
Write the table to a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to write the table to
Optional Keyword Arguments:
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
try:
_to_hdu(self).writeto(filename)
except:
_to_hdu(self).writeto(filename, output_verify='silentfix')
# PyFITS can handle compression, so no decompression detection
@auto_download_to_file
@auto_fileobj_to_file
def read_set(self, filename, memmap=False, verbose=True):
'''
Read all tables from a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to read the tables from
Optional Keyword Arguments:
*memmap*: [ bool ]
Whether PyFITS should use memory mapping
'''
self.reset()
# Read in primary header
header = fits.getheader(filename, 0)
for key in header.keys():
if not key[:4] in ['TFOR', 'TDIS', 'TDIM', 'TTYP', 'TUNI'] and \
not key in standard_keys:
self.add_keyword(key, header[key])
try:
header['COMMENT']
except KeyError:
pass
else:
# PyFITS used to define header['COMMENT'] as the last comment read in
# (which was a string), but now defines it as a _HeaderCommentaryCards
# object
if isinstance(header['COMMENT'], basestring):
for comment in header.get_comment():
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
else:
for comment in header['COMMENT']:
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
# Read in tables one by one
from .basetable import Table
for hdu in _list_tables(filename):
table = Table()
read(table, filename, hdu=hdu, memmap=memmap, verbose=verbose)
self.append(table)
def write_set(self, filename, overwrite=False):
'''
Write the tables to a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to write the tables to
Optional Keyword Arguments:
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
primary = fits.PrimaryHDU()
for key in self.keywords:
if len(key) > 8:
keyname = "hierarch " + key
else:
keyname = key
try: # PyFITS 3.x
primary.header[keyname] = self.keywords[key]
except KeyError: # PyFITS 2.x
primary.header.update(keyname, self.keywords[key])
for comment in self.comments:
primary.header.add_comment(comment)
hdulist = [primary]
for table_key in self.tables:
hdulist.append(_to_hdu(self.tables[table_key]))
hdulist = fits.HDUList(hdulist)
hdulist.writeto(filename) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/fitstable.py | fitstable.py |
from __future__ import print_function
########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.__func__
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.__func__
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.__globals__.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/decorator.py | decorator.py |
from __future__ import print_function, division
from distutils import version
import warnings
import tempfile
vo_minimum_version = version.LooseVersion('0.3')
try:
import vo.conesearch as vcone
vo_installed = True
except:
vo_installed = False
def _check_vo_installed():
if not vo_installed:
raise Exception("Cannot query the VO - vo " + \
vo_minimum_version.vstring + " or later required")
def read(self, catalog=None, ra=None, dec=None, radius=None, verb=1,
pedantic=False, **kwargs):
'''
Query a VO catalog using the STScI vo module
This docstring has been adapted from the STScI vo conesearch module:
*catalog* [ None | string | VOSCatalog | list ]
May be one of the following, in order from easiest to use to most
control:
- None: A database of conesearch catalogs is downloaded from
STScI. The first catalog in the database to successfully return
a result is used.
- catalog name: A name in the database of conesearch catalogs at
STScI is used. For a list of acceptable names, see
vo_conesearch.list_catalogs().
- url: The prefix of a url to a IVOA Cone Search Service. Must end
in either ? or &.
- A VOSCatalog instance: A specific catalog manually downloaded
and selected from the database using the APIs in the
STScI vo.vos_catalog module.
- Any of the above 3 options combined in a list, in which case
they are tried in order.
*pedantic* [ bool ]
When pedantic is True, raise an error when the returned VOTable
file violates the spec, otherwise issue a warning.
*ra* [ float ]
A right-ascension in the ICRS coordinate system for the position
of the center of the cone to search, given in decimal degrees.
*dec* [ float ]
A declination in the ICRS coordinate system for the position of
the center of the cone to search, given in decimal degrees.
*radius* [ float]
The radius of the cone to search, given in decimal degrees.
*verb* [ int ]
Verbosity, 1, 2, or 3, indicating how many columns are to be
returned in the resulting table. Support for this parameter by a
Cone Search service implementation is optional. If the service
supports the parameter, then when the value is 1, the response
should include the bare minimum of columns that the provider
considers useful in describing the returned objects. When the
value is 3, the service should return all of the columns that are
available for describing the objects. A value of 2 is intended for
requesting a medium number of columns between the minimum and
maximum (inclusive) that are considered by the provider to most
typically useful to the user. When the verb parameter is not
provided, the server should respond as if verb = 2. If the verb
parameter is not supported by the service, the service should
ignore the parameter and should always return the same columns for
every request.
Additional keyword arguments may be provided to pass along to the
server. These arguments are specific to the particular catalog being
queried.
'''
_check_vo_installed()
self.reset()
# Perform the cone search
VOTable = vcone.conesearch(catalog_db=catalog, pedantic=pedantic,
ra=ra, dec=dec, sr=radius, verb=verb, **kwargs)
# Write table to temporary file
output = tempfile.NamedTemporaryFile()
VOTable._votable.to_xml(output)
output.flush()
# Read it in using ATpy VO reader
self.read(output.name, type='vo', verbose=False)
# Check if table is empty
if len(self) == 0:
warnings.warn("Query returned no results, so the table will be empty")
# Remove temporary file
output.close()
def list_catalogs():
_check_vo_installed()
for catalog in vcone.list_catalogs():
if "BROKEN" in catalog:
continue
print("%30s" % catalog) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/vo_conesearch.py | vo_conesearch.py |
from __future__ import print_function, division
import os
import numpy as np
try:
asstr = np.compat.asstr
except AttributeError: # For Numpy 1.4.1
import sys
if sys.version_info[0] >= 3:
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
else:
asstr = str
from .exceptions import TableException
from .decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
try:
import h5py
h5py_installed = True
except:
h5py_installed = False
STRING_TYPES = [bytes, np.string_, str]
try:
STRING_TYPES.append(np.bytes_)
except AttributeError:
pass
try:
STRING_TYPES.append(unicode)
except NameError:
pass
def _check_h5py_installed():
if not h5py_installed:
raise Exception("Cannot read/write HDF5 files - h5py required")
def _get_group(filename, group="", append=False):
if append:
f = h5py.File(filename, 'a')
else:
f = h5py.File(filename, 'w')
if group:
if append:
if group in f.keys():
g = f[group]
else:
g = f.create_group(group)
else:
g = f.create_group(group)
else:
g = f
return f, g
def _create_required_groups(g, path):
'''
Given a file or group handle, and a path, make sure that the specified
path exists and create if necessary.
'''
for dirname in path.split('/'):
if not dirname in g:
g = g.create_group(dirname)
else:
g = g[dirname]
def _list_tables(file_handle):
list_of_names = []
file_handle.visit(list_of_names.append)
tables = {}
for item in list_of_names:
if isinstance(file_handle[item], h5py.highlevel.Dataset):
if file_handle[item].dtype.names:
tables[item] = item
return tables
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, table=None, verbose=True):
'''
Read a table from an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to read the table from
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to read the table from
Optional Keyword Arguments:
*table*: [ string ]
The name of the table to read from the HDF5 file (this is only
required if there are more than one table in the file)
'''
_check_h5py_installed()
self.reset()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
else:
if not os.path.exists(filename):
raise Exception("File not found: %s" % filename)
f = h5py.File(filename, 'r')
g = f['/']
# If no table is requested, check that there is only one table
if table is None:
tables = _list_tables(g)
if len(tables) == 1:
table = tables.keys()[0]
else:
raise TableException(tables, 'table')
# Set the table name
self.table_name = str(table)
self._setup_table(len(g[table]), g[table].dtype)
# Add columns to table
for name in g[table].dtype.names:
self.data[name][:] = g[table][name][:]
for attribute in g[table].attrs:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8
if type(g[table].attrs[attribute]) in STRING_TYPES:
self.add_keyword(attribute, asstr(g[table].attrs[attribute]))
else:
self.add_keyword(attribute, g[table].attrs[attribute])
if f is not None:
f.close()
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read_set(self, filename, pedantic=False, verbose=True):
'''
Read all tables from an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to read the tables from
'''
_check_h5py_installed()
self.reset()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
else:
if not os.path.exists(filename):
raise Exception("File not found: %s" % filename)
f = h5py.File(filename, 'r')
g = f['/']
for keyword in g.attrs:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8
if type(g.attrs[keyword]) in STRING_TYPES:
self.keywords[keyword] = asstr(g.attrs[keyword])
else:
self.keywords[keyword] = g.attrs[keyword]
from .basetable import Table
for table in _list_tables(g):
t = Table()
read(t, filename, table=table, verbose=verbose)
self.append(t)
if f is not None:
f.close()
def write(self, filename, compression=False, group="", append=False,
overwrite=False, ignore_groups=False):
'''
Write the table to an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to write the table to
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to write the table to
Optional Keyword Arguments:
*compression*: [ True | False ]
Whether to compress the table inside the HDF5 file
*group*: [ string ]
The group to write the table to inside the HDF5 file
*append*: [ True | False ]
Whether to append the table to an existing HDF5 file
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
*ignore_groups*: [ True | False ]
With this option set to True, groups are removed from table names.
With this option set to False, tables are placed in groups that
are present in the table name, and the groups are created if
necessary.
'''
_check_h5py_installed()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
if group:
if group in g:
g = g[group]
else:
g = g.create_group(group)
else:
if os.path.exists(filename) and not append:
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
f, g = _get_group(filename, group=group, append=append)
if self.table_name:
name = self.table_name
else:
name = "Table"
if ignore_groups:
name = os.path.basename(name)
else:
path = os.path.dirname(name)
if path:
_create_required_groups(g, path)
if name in g.keys():
raise Exception("Table %s/%s already exists" % (group, name))
dset = g.create_dataset(name, data=self.data, compression=compression)
for keyword in self.keywords:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8. In addition, we have to use
# np.string_ to ensure that fixed-length attributes are used.
if isinstance(self.keywords[keyword], basestring):
dset.attrs[keyword] = np.string_(self.keywords[keyword])
else:
dset.attrs[keyword] = self.keywords[keyword]
if f is not None:
f.close()
def write_set(self, filename, compression=False, group="", append=False,
overwrite=False, ignore_groups=False, **kwargs):
'''
Write the tables to an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to write the tables to
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to write the tables to
Optional Keyword Arguments:
*compression*: [ True | False ]
Whether to compress the tables inside the HDF5 file
*group*: [ string ]
The group to write the table to inside the HDF5 file
*append*: [ True | False ]
Whether to append the tables to an existing HDF5 file
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
*ignore_groups*: [ True | False ]
With this option set to True, groups are removed from table names.
With this option set to False, tables are placed in groups that
are present in the table name, and the groups are created if
necessary.
'''
_check_h5py_installed()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
if group:
if group in g:
g = g[group]
else:
g = g.create_group(group)
else:
if os.path.exists(filename) and not append:
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
f, g = _get_group(filename, group=group, append=append)
for keyword in self.keywords:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8. In addition, we have to use
# np.string_ to ensure that fixed-length attributes are used.
if isinstance(self.keywords[keyword], basestring):
g.attrs[keyword] = np.string_(self.keywords[keyword])
else:
g.attrs[keyword] = self.keywords[keyword]
for i, table_key in enumerate(self.tables):
if self.tables[table_key].table_name:
name = self.tables[table_key].table_name
else:
name = "Table_%02i" % i
if ignore_groups:
name = os.path.basename(name)
else:
path = os.path.dirname(name)
if path:
_create_required_groups(g, path)
if name in g.keys():
raise Exception("Table %s/%s already exists" % (group, name))
dset = g.create_dataset(name, data=self.tables[table_key].data, compression=compression)
for keyword in self.tables[table_key].keywords:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8. In addition, we have to use
# np.string_ to ensure that fixed-length attributes are used.
if isinstance(self.tables[table_key].keywords[keyword], basestring):
dset.attrs[keyword] = np.string_(self.tables[table_key].keywords[keyword])
else:
dset.attrs[keyword] = self.tables[table_key].keywords[keyword]
if f is not None:
f.close() | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/hdf5table.py | hdf5table.py |
from __future__ import print_function, division
import os
import numpy as np
import warnings
from astropy.io.votable import parse
from astropy.io.votable.tree import VOTableFile, Resource, Field, Param
from astropy.io.votable.tree import Table as VOTable
from .exceptions import TableException
from .helpers import smart_dtype
from .decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
# Define type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "boolean"
type_dict[np.uint8] = "unsignedByte"
type_dict[np.int16] = "short"
type_dict[np.int32] = "int"
type_dict[np.int64] = "long"
type_dict[np.float32] = "float"
type_dict[np.float64] = "double"
type_dict[np.str] = "char"
type_dict[np.string_] = "char"
type_dict[str] = "char"
def _list_tables(filename, pedantic=False):
votable = parse(filename, pedantic=pedantic)
tables = {}
for i, table in enumerate(votable.iter_tables()):
tables[i] = table.name
return tables
# VO can handle file objects, but because we need to read it twice we don't
# use that capability
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, pedantic=False, tid=-1, verbose=True):
'''
Read a table from a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to read the table from
Optional Keyword Arguments:
*tid*: [ integer ]
The ID of the table to read from the VO file (this is
only required if there are more than one table in the VO file)
*pedantic*: [ True | False ]
When *pedantic* is True, raise an error when the file violates
the VO Table specification, otherwise issue a warning.
'''
self.reset()
# If no table is requested, check that there is only one table
if tid==-1:
tables = _list_tables(filename, pedantic=pedantic)
if len(tables) == 1:
tid = 0
elif len(tables) == 0:
raise Exception("There are no tables present in this file")
else:
raise TableException(tables, 'tid')
votable = parse(filename, pedantic=pedantic)
for id, table in enumerate(votable.iter_tables()):
if id==tid:
break
if table.ID:
self.table_name = str(table.ID)
elif table.name:
self.table_name = str(table.name)
for field in table.fields:
colname = field.ID
if table.array.size:
data = table.array[colname]
else:
data = np.array([], dtype=field.converter.format)
if len(data) > 0 and data.ndim == 1 and not np.all([np.isscalar(x) for x in data]):
warnings.warn("VO Variable length vector column detected (%s) - converting to string" % colname)
data = np.array([str(x) for x in data])
if self._masked:
self.add_column(str(colname), np.array(data), \
unit=field.unit, mask=data.mask[colname], \
description=field.description)
else:
self.add_column(str(colname), np.array(data),
unit=field.unit, description=field.description)
for param in table.params:
self.add_keyword(param.ID, param.value)
def _to_table(self, vo_table):
'''
Return the current table as a VOT object
'''
table = VOTable(vo_table)
# Add keywords
for key in self.keywords:
if isinstance(self.keywords[key], basestring):
arraysize = '*'
else:
arraysize = None
param = Param(table, name=key, ID=key, value=self.keywords[key], arraysize=arraysize)
table.params.append(param)
# Define some fields
n_rows = len(self)
fields = []
for i, name in enumerate(self.names):
data = self.data[name]
unit = self.columns[name].unit
description = self.columns[name].description
dtype = self.columns[name].dtype
column_type = smart_dtype(dtype)
if data.ndim > 1:
arraysize = str(data.shape[1])
else:
arraysize = None
if column_type in type_dict:
datatype = type_dict[column_type]
elif column_type == np.int8:
warnings.warn("int8 unsupported - converting to int16")
datatype = type_dict[np.int16]
elif column_type == np.uint16:
warnings.warn("uint16 unsupported - converting to int32")
datatype = type_dict[np.int32]
elif column_type == np.uint32:
warnings.warn("uint32 unsupported - converting to int64")
datatype = type_dict[np.int64]
elif column_type == np.uint64:
raise Exception("uint64 unsupported")
else:
raise Exception("cannot use numpy type " + str(column_type))
if column_type == np.float32:
precision = 'E9'
elif column_type == np.float64:
precision = 'E17'
else:
precision = None
if datatype == 'char':
if arraysize is None:
arraysize = '*'
else:
raise ValueError("Cannot write vector string columns to VO files")
field = Field(vo_table, ID=name, name=name, \
datatype=datatype, unit=unit, arraysize=arraysize, \
precision=precision)
field.description = description
fields.append(field)
table.fields.extend(fields)
table.create_arrays(n_rows)
# Character columns are stored as object columns in the vo_table
# instance. Leaving the type as string should work, but causes
# a segmentation fault on MacOS X with Python 2.6 64-bit so
# we force the conversion to object type columns.
for name in self.names:
dtype = self.columns[name].dtype
column_type = smart_dtype(dtype)
# Add data to the table
# At the moment, null values in VO table are dealt with via a
# 'mask' record array
if column_type == np.string_:
table.array[name] = self.data[name].astype(np.object_)
if self._masked:
table.array.mask[name] = self.data[name].mask.astype(np.object_)
else:
if self.data[name].dtype.type == np.bytes_ and type(self.columns[name].null) != bytes:
table.array.mask[name] = (self.data[name] == \
self.columns[name].null.encode('utf-8')).astype(np.object_)
else:
table.array.mask[name] = (self.data[name] == \
self.columns[name].null).astype(np.object_)
else:
table.array[name] = self.data[name]
if self._masked:
table.array.mask[name] = self.data[name].mask
else:
table.array.mask[name] = self.data[name] == \
self.columns[name].null
table.name = self.table_name
return table
def write(self, filename, votype='ascii', overwrite=False):
'''
Write the table to a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to write the table to
Optional Keyword Arguments:
*votype*: [ 'ascii' | 'binary' ]
Whether to write the table as ASCII or binary
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
vo_table = VOTableFile()
resource = Resource()
vo_table.resources.append(resource)
resource.tables.append(_to_table(self, vo_table))
if votype is 'binary':
vo_table.get_first_table().format = 'binary'
vo_table.set_all_tables_format('binary')
vo_table.to_xml(filename)
# VO can handle file objects, but because we need to read it twice we don't
# use that capability
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read_set(self, filename, pedantic=False, verbose=True):
'''
Read all tables from a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to read the tables from
Optional Keyword Arguments:
*pedantic*: [ True | False ]
When *pedantic* is True, raise an error when the file violates
the VO Table specification, otherwise issue a warning.
'''
self.reset()
from .basetable import Table
for tid in _list_tables(filename, pedantic=pedantic):
t = Table()
read(t, filename, tid=tid, verbose=verbose, pedantic=pedantic)
self.append(t)
def write_set(self, filename, votype='ascii', overwrite=False):
'''
Write all tables to a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to write the tables to
Optional Keyword Arguments:
*votype*: [ 'ascii' | 'binary' ]
Whether to write the tables as ASCII or binary tables
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
vo_table = VOTableFile()
resource = Resource()
vo_table.resources.append(resource)
for table_key in self.tables:
resource.tables.append(_to_table(self.tables[table_key], vo_table))
if votype is 'binary':
vo_table.get_first_table().format = 'binary'
vo_table.set_all_tables_format('binary')
vo_table.to_xml(filename) | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/votable.py | votable.py |
from __future__ import print_function, division
# Need to depracate fits_read, etc.
import string
import warnings
from copy import deepcopy
import numpy as np
import numpy.ma as ma
from .exceptions import VectorException
from .structhelper import append_field, drop_fields
from .odict import odict
from . import registry
from .masked import __masked__
default_format = {}
default_format[None.__class__] = '16.9e'
default_format[np.bool_] = '1i'
default_format[np.int8] = '3i'
default_format[np.uint8] = '3i'
default_format[np.int16] = '5i'
default_format[np.uint16] = '5i'
default_format[np.int32] = '12i'
default_format[np.uint32] = '12i'
default_format[np.int64] = '22i'
default_format[np.uint64] = '23i'
default_format[np.float32] = '16.8e'
default_format[np.float64] = '25.17e'
default_format[np.str] = 's'
default_format[np.string_] = 's'
default_format[np.uint8] = 's'
default_format[str] = 's'
default_format[np.unicode_] = 's'
class ColumnHeader(object):
def __init__(self, dtype, unit=None, description=None, null=None, format=None):
self.__dict__['dtype'] = dtype
self.unit = unit
self.description = description
self.__dict__['null'] = null
self.format = format
def __setattr__(self, attribute, value):
if attribute in ['unit', 'description', 'format']:
self.__dict__[attribute] = value
elif attribute in ['null', 'dtype']:
raise Exception("Cannot change %s through the columns object" % attribute)
else:
raise AttributeError(attribute)
def __repr__(self):
s = "type=%s" % str(self.dtype)
if self.unit:
s += ", unit=%s" % str(self.unit)
if self.null:
s += ", null=%s" % str(self.null)
if self.description:
s += ", description=%s" % self.description
return s
def __eq__(self, other):
if self.dtype != other.dtype:
return False
if self.unit != other.unit:
return False
if self.description != other.description:
return False
if self.null != other.null:
if np.isnan(self.null):
if not np.isnan(other.null):
return False
else:
return False
if self.format != other.format:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class Table(object):
def fits_read(self, *args, **kwargs):
warnings.warn("WARNING: fits_read is deprecated; use read instead")
kwargs['type'] = 'fits'
self.read(*args, **kwargs)
def vo_read(self, *args, **kwargs):
warnings.warn("WARNING: vo_read is deprecated; use read instead")
kwargs['type'] = 'vo'
self.read(*args, **kwargs)
def sql_read(self, *args, **kwargs):
warnings.warn("WARNING: sql_read is deprecated; use read instead")
kwargs['type'] = 'sql'
self.read(*args, **kwargs)
def ipac_read(self, *args, **kwargs):
warnings.warn("WARNING: ipac_read is deprecated; use read instead")
kwargs['type'] = 'ipac'
self.read(*args, **kwargs)
def fits_write(self, *args, **kwargs):
warnings.warn("WARNING: fits_write is deprecated; use write instead")
kwargs['type'] = 'fits'
self.write(*args, **kwargs)
def vo_write(self, *args, **kwargs):
warnings.warn("WARNING: vo_write is deprecated; use write instead")
kwargs['type'] = 'vo'
self.write(*args, **kwargs)
def sql_write(self, *args, **kwargs):
warnings.warn("WARNING: sql_write is deprecated; use write instead")
kwargs['type'] = 'sql'
self.write(*args, **kwargs)
def ipac_write(self, *args, **kwargs):
warnings.warn("WARNING: ipac_write is deprecated; use write instead")
kwargs['type'] = 'ipac'
self.write(*args, **kwargs)
def __repr__(self):
s = "<Table name='%s' rows=%i fields=%i>" % (self.table_name, self.__len__(), len(self.columns))
return s
def __init__(self, *args, **kwargs):
'''
Create a table instance
Optional Arguments:
If no arguments are given, and empty table is created
If one or more arguments are given they are passed to the
Table.read() method.
Optional Keyword Arguments (independent of table type):
*name*: [ string ]
The table name
*masked*: [ True | False ]
Whether to use masked arrays. WARNING: this feature is
experimental and will only work correctly with the svn version
of numpy post-revision 8025. Note that this overrides the
default set by atpy.set_masked_default.
'''
self.reset()
if 'name' in kwargs:
self.table_name = kwargs.pop('name')
else:
self.table_name = None
if 'masked' in kwargs:
self._masked = kwargs.pop('masked')
else:
self._masked = __masked__
if len(args) + len(kwargs) > 0:
self.read(*args, **kwargs)
return
def read(self, *args, **kwargs):
'''
Read in a table from a file/database.
Optional Keyword Arguments (independent of table type):
*verbose*: [ True | False ]
Whether to print out warnings when reading (default is True)
*type*: [ string ]
The read method attempts to automatically guess the
file/database format based on the arguments supplied. The type
can be overridden by setting this argument.
'''
if 'verbose' in kwargs:
verbose = kwargs['verbose']
else:
verbose = True
if 'type' in kwargs:
table_type = kwargs.pop('type').lower()
elif isinstance(args[0], basestring):
table_type = registry._determine_type(args[0], verbose)
else:
raise Exception('Could not determine table type')
original_filters = warnings.filters[:]
if verbose:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
try:
if verbose:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
if table_type in registry._readers:
registry._readers[table_type](self, *args, **kwargs)
else:
raise Exception("Unknown table type: " + table_type)
finally:
warnings.filters = original_filters
return
def write(self, *args, **kwargs):
'''
Write out a table to a file/database.
Optional Keyword Arguments (independent of table type):
*verbose*: [ True | False ]
Whether to print out warnings when writing (default is True)
*type*: [ string ]
The read method attempts to automatically guess the
file/database format based on the arguments supplied. The type
can be overridden by setting this argument.
'''
if 'verbose' in kwargs:
verbose = kwargs.pop('verbose')
else:
verbose = True
if 'type' in kwargs:
table_type = kwargs.pop('type').lower()
elif type(args[0]) == str:
table_type = registry._determine_type(args[0], verbose)
else:
raise Exception('Could not determine table type')
original_filters = warnings.filters[:]
if verbose:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
try:
if table_type in registry._writers:
registry._writers[table_type](self, *args, **kwargs)
else:
raise Exception("Unknown table type: " + table_type)
finally:
warnings.filters = original_filters
return
def __getattr__(self, attribute):
if attribute == 'names':
return self.__dict__['data'].dtype.names
elif attribute == 'units':
print("WARNING: Table.units is deprecated - use Table.columns to access this information")
return dict([(name, self.columns[name].unit) for name in self.names])
elif attribute == 'types':
print("WARNING: Table.types is deprecated - use Table.columns to access this information")
return dict([(name, self.columns[name].type) for name in self.names])
elif attribute == 'nulls':
print("WARNING: Table.nulls is deprecated - use Table.columns to access this information")
return dict([(name, self.columns[name].null) for name in self.names])
elif attribute == 'formats':
print("WARNING: Table.formats is deprecated - use Table.columns to access this information")
return dict([(name, self.columns[name].format) for name in self.names])
elif attribute == 'shape':
return (len(self.data), len(self.names))
else:
try:
return self.__dict__['data'][attribute]
except:
raise AttributeError(attribute)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, item, value):
if 'data' in self.__dict__:
if isinstance(self.data, np.ndarray):
if item in self.data.dtype.names:
self.data[item] = value
return
raise ValueError("Column %s does not exist" % item)
def keys(self):
return self.data.dtype.names
def append(self, table):
for colname in self.columns:
if self.columns.keys != table.columns.keys:
raise Exception("Column names do not match")
if self.columns[colname].dtype.type != table.columns[colname].dtype.type:
raise Exception("Column types do not match")
self.data = np.hstack((self.data, table.data))
def __setattr__(self, attribute, value):
if 'data' in self.__dict__:
if isinstance(self.data, np.ndarray):
if attribute in self.data.dtype.names:
self.data[attribute] = value
return
self.__dict__[attribute] = value
def __len__(self):
if len(self.columns) == 0:
return 0
else:
return len(self.data)
def reset(self):
'''
Empty the table
'''
self.keywords = odict()
self.comments = []
self.columns = odict()
self.data = None
self._primary_key = None
return
def _raise_vector_columns(self):
names = []
for name in self.names:
if self.data[name].ndim > 1:
names.append(name)
if names:
names = string.join(names, ", ")
raise VectorException(names)
return
def _setup_table(self, n_rows, dtype, units=None, descriptions=None, formats=None, nulls=None):
if self._masked:
self.data = ma.zeros(n_rows, dtype=dtype)
else:
self.data = np.zeros(n_rows, dtype=dtype)
for i in range(len(dtype)):
if units is None:
unit = None
else:
unit = units[i]
if descriptions is None:
description = None
else:
description = descriptions[i]
if formats is None or format in ['e', 'g', 'f']:
if dtype[i].subdtype:
format = default_format[dtype[i].subdtype[0].type]
else:
format = default_format[dtype[i].type]
else:
format = formats[i]
# Backward compatibility with tuple-style format
if type(format) in [tuple, list]:
format = string.join([str(x) for x in format], "")
if format == 's':
format = '%is' % self.data.itemsize
if nulls is None:
null = None
else:
null = nulls[i]
column = ColumnHeader(dtype[i], unit=unit, description=description, null=null, format=format)
self.columns[dtype.names[i]] = column
def add_empty_column(self, name, dtype, unit='', null='', \
description='', format=None, column_header=None, shape=None, before=None, after=None, \
position=None):
'''
Add an empty column to the table. This only works if there
are already existing columns in the table.
Required Arguments:
*name*: [ string ]
The name of the column to add
*dtype*: [ numpy type ]
Numpy type of the column. This is the equivalent to
the dtype= argument in numpy.array
Optional Keyword Arguments:
*unit*: [ string ]
The unit of the values in the column
*null*: [ same type as data ]
The values corresponding to 'null', if not NaN
*description*: [ string ]
A description of the content of the column
*format*: [ string ]
The format to use for ASCII printing
*column_header*: [ ColumnHeader ]
The metadata from an existing column to copy over. Metadata
includes the unit, null value, description, format, and dtype.
For example, to create a column 'b' with identical metadata to
column 'a' in table 't', use:
>>> t.add_column('b', column_header=t.columns[a])
*shape*: [ tuple ]
Tuple describing the shape of the empty column that is to be
added. If a one element tuple is specified, it is the number
of rows. If a two element tuple is specified, the first is the
number of rows, and the second is the width of the column.
*before*: [ string ]
Column before which the new column should be inserted
*after*: [ string ]
Column after which the new column should be inserted
*position*: [ integer ]
Position at which the new column should be inserted (0 = first
column)
'''
if shape:
data = np.zeros(shape, dtype=dtype)
elif self.__len__() > 0:
data = np.zeros(self.__len__(), dtype=dtype)
else:
raise Exception("Table is empty, you need to use the shape= argument to specify the dimensions of the first column")
self.add_column(name, data, unit=unit, null=null, \
description=description, format=format, column_header=column_header, before=before, \
after=after, position=position)
def add_column(self, name, data, unit='', null='', description='', \
format=None, dtype=None, column_header=None, before=None, after=None, position=None, mask=None, fill=None):
'''
Add a column to the table
Required Arguments:
*name*: [ string ]
The name of the column to add
*data*: [ numpy array ]
The column data
Optional Keyword Arguments:
*unit*: [ string ]
The unit of the values in the column
*null*: [ same type as data ]
The values corresponding to 'null', if not NaN
*description*: [ string ]
A description of the content of the column
*format*: [ string ]
The format to use for ASCII printing
*dtype*: [ numpy type ]
Numpy type to convert the data to. This is the equivalent to
the dtype= argument in numpy.array
*column_header*: [ ColumnHeader ]
The metadata from an existing column to copy over. Metadata
includes the unit, null value, description, format, and dtype.
For example, to create a column 'b' with identical metadata to
column 'a' in table 't', use:
>>> t.add_column('b', column_header=t.columns[a])
*before*: [ string ]
Column before which the new column should be inserted
*after*: [ string ]
Column after which the new column should be inserted
*position*: [ integer ]
Position at which the new column should be inserted (0 = first
column)
*mask*: [ numpy array ]
An array of booleans, with the same dimensions as the data,
indicating whether or not to mask values.
*fill*: [ value ]
If masked arrays are used, this value is used as the fill
value in the numpy masked array.
'''
if column_header is not None:
if dtype is not None:
warnings.warn("dtype= argument overriden by column_header=")
dtype = column_header.dtype
if unit != '':
warnings.warn("unit= argument overriden by column_header=")
unit = column_header.unit
if null != '':
warnings.warn("null= argument overriden by column_header=")
null = column_header.null
if description != '':
warnings.warn("description= argument overriden by column_header=")
description = column_header.description
if format is not None:
warnings.warn("format= argument overriden by column_header=")
format = column_header.format
if self._masked:
if null:
warnings.warn("null= argument can only be used if Table does not use masked arrays (ignored)")
data = ma.array(data, dtype=dtype, mask=mask, fill_value=fill)
else:
if np.any(mask):
warnings.warn("mask= argument can only be used if Table uses masked arrays (ignored)")
data = np.array(data, dtype=dtype)
dtype = data.dtype
if dtype.type == np.object_:
if len(data) == 0:
longest = 0
else:
longest = len(max(data, key=len))
if self._masked:
data = ma.array(data, dtype='|%iS' % longest)
else:
data = np.array(data, dtype='|%iS' % longest)
dtype = data.dtype
if data.ndim > 1:
newdtype = (name, data.dtype, (data.shape[1],))
else:
newdtype = (name, data.dtype)
if before:
try:
position = list(self.names).index(before)
except:
raise Exception("Column %s does not exist" % before)
elif after:
try:
position = list(self.names).index(after) + 1
except:
raise Exception("Column %s does not exist" % before)
if len(self.columns) > 0:
self.data = append_field(self.data, data, dtype=newdtype, position=position, masked=self._masked)
else:
if self._masked:
self.data = ma.array(zip(data), dtype=[newdtype], mask=zip(data.mask), fill_value=data.fill_value)
else:
self.data = np.array(zip(data), dtype=[newdtype])
if not format or format in ['e', 'g', 'f']:
format = default_format[dtype.type]
# Backward compatibility with tuple-style format
if type(format) in [tuple, list]:
format = string.join([str(x) for x in format], "")
if format == 's':
format = '%is' % data.itemsize
column = ColumnHeader(dtype, unit=unit, description=description, null=null, format=format)
if not np.equal(position, None):
self.columns.insert(position, name, column)
else:
self.columns[name] = column
return
def remove_column(self, remove_name):
print("WARNING: remove_column is deprecated - use remove_columns instead")
self.remove_columns([remove_name])
return
def remove_columns(self, remove_names):
'''
Remove several columns from the table
Required Argument:
*remove_names*: [ list of strings ]
A list containing the names of the columns to remove
'''
if type(remove_names) == str:
remove_names = [remove_names]
for remove_name in remove_names:
self.columns.pop(remove_name)
self.data = drop_fields(self.data, remove_names, masked=self._masked)
# Remove primary key if needed
if self._primary_key in remove_names:
self._primary_key = None
return
def keep_columns(self, keep_names):
'''
Keep only specific columns in the table (remove the others)
Required Argument:
*keep_names*: [ list of strings ]
A list containing the names of the columns to keep.
All other columns will be removed.
'''
if type(keep_names) == str:
keep_names = [keep_names]
remove_names = list(set(self.names) - set(keep_names))
if len(remove_names) == len(self.names):
raise Exception("No columns to keep")
self.remove_columns(remove_names)
return
def rename_column(self, old_name, new_name):
'''
Rename a column from the table
Require Arguments:
*old_name*: [ string ]
The current name of the column.
*new_name*: [ string ]
The new name for the column
'''
if new_name in self.names:
raise Exception("Column " + new_name + " already exists")
if not old_name in self.names:
raise Exception("Column " + old_name + " not found")
# tuple.index was only introduced in Python 2.6, so need to use list()
pos = list(self.names).index(old_name)
self.data.dtype.names = self.names[:pos] + (new_name, ) + self.names[pos + 1:]
if self._masked:
self.data.mask.dtype.names = self.data.dtype.names[:]
self.columns.rename(old_name, new_name)
# Update primary key if needed
if self._primary_key == old_name:
self._primary_key = new_name
return
def describe(self):
'''
Prints a description of the table
'''
if self.data is None:
print("Table is empty")
return
if self.table_name:
print("Table : " + self.table_name)
else:
print("Table has no name")
# Find maximum column widths
len_name_max, len_unit_max, len_datatype_max, \
len_formats_max = 4, 4, 4, 6
for name in self.names:
len_name_max = max(len(name), len_name_max)
len_unit_max = max(len(str(self.columns[name].unit)), len_unit_max)
len_datatype_max = max(len(str(self.columns[name].dtype)), \
len_datatype_max)
len_formats_max = max(len(self.columns[name].format), len_formats_max)
# Print out table
format = "| %" + str(len_name_max) + \
"s | %" + str(len_unit_max) + \
"s | %" + str(len_datatype_max) + \
"s | %" + str(len_formats_max) + "s |"
len_tot = len_name_max + len_unit_max + len_datatype_max + \
len_formats_max + 13
print("-" * len_tot)
print(format % ("Name", "Unit", "Type", "Format"))
print("-" * len_tot)
for name in self.names:
print(format % (name, str(self.columns[name].unit), \
str(self.columns[name].dtype), self.columns[name].format))
print("-" * len_tot)
return
def sort(self, keys):
'''
Sort the table according to one or more keys. This operates
on the existing table (and does not return a new table).
Required arguments:
*keys*: [ string | list of strings ]
The key(s) to order by
'''
if not type(keys) == list:
keys = [keys]
self.data.sort(order=keys)
def row(self, row_number, python_types=False):
'''
Returns a single row
Required arguments:
*row_number*: [ integer ]
The row number (the first row is 0)
Optional Keyword Arguments:
*python_types*: [ True | False ]
Whether to return the row elements with python (True)
or numpy (False) types.
'''
if python_types:
return list(self.data[row_number].tolist())
else:
return self.data[row_number]
def rows(self, row_ids):
'''
Select specific rows from the table and return a new table instance
Required Argument:
*row_ids*: [ list | np.int array ]
A python list or numpy array specifying which rows to select,
and in what order.
Returns:
A new table instance, containing only the rows selected
'''
return self.where(np.array(row_ids, dtype=int))
def where(self, mask):
'''
Select matching rows from the table and return a new table instance
Required Argument:
*mask*: [ np.bool array ]
A boolean array with the same length as the table.
Returns:
A new table instance, containing only the rows selected
'''
new_table = self.__class__()
new_table.table_name = deepcopy(self.table_name)
new_table.columns = deepcopy(self.columns)
new_table.keywords = deepcopy(self.keywords)
new_table.comments = deepcopy(self.comments)
new_table.data = self.data[mask]
return new_table
def add_comment(self, comment):
'''
Add a comment to the table
Required Argument:
*comment*: [ string ]
The comment to add to the table
'''
self.comments.append(comment.strip())
return
def add_keyword(self, key, value):
'''
Add a keyword/value pair to the table
Required Arguments:
*key*: [ string ]
The name of the keyword
*value*: [ string | float | integer | bool ]
The value of the keyword
'''
if type(value) == str:
value = value.strip()
self.keywords[key.strip()] = value
return
def set_primary_key(self, key):
'''
Set the name of the table column that should be used as a unique
identifier for the record. This is the same as primary keys in SQL
databases. A primary column cannot contain NULLs and must contain only
unique quantities.
Required Arguments:
*key*: [ string ]
The column to use as a primary key
'''
if not key in self.names:
raise Exception("No such column: %s" % key)
else:
if self.columns[key].null != '':
if np.any(self.data[key] == self.columns[key].null):
raise Exception("Primary key column cannot contain null values")
elif len(np.unique(self.data[key])) != len(self.data[key]):
raise Exception("Primary key column cannot contain duplicate values")
else:
self._primary_key = key
return
class TableSet(object):
def fits_read(self, *args, **kwargs):
warnings.warn("WARNING: fits_read is deprecated; use read instead")
kwargs['type'] = 'fits'
self.read(*args, **kwargs)
def vo_read(self, *args, **kwargs):
warnings.warn("WARNING: vo_read is deprecated; use read instead")
kwargs['type'] = 'vo'
self.read(*args, **kwargs)
def sql_read(self, *args, **kwargs):
warnings.warn("WARNING: sql_read is deprecated; use read instead")
kwargs['type'] = 'sql'
self.read(*args, **kwargs)
def ipac_read(self, *args, **kwargs):
warnings.warn("WARNING: ipac_read is deprecated; use read instead")
kwargs['type'] = 'ipac'
self.read(*args, **kwargs)
def fits_write(self, *args, **kwargs):
warnings.warn("WARNING: fits_write is deprecated; use write instead")
kwargs['type'] = 'fits'
self.write(*args, **kwargs)
def vo_write(self, *args, **kwargs):
warnings.warn("WARNING: vo_write is deprecated; use write instead")
kwargs['type'] = 'vo'
self.write(*args, **kwargs)
def sql_write(self, *args, **kwargs):
warnings.warn("WARNING: sql_write is deprecated; use write instead")
kwargs['type'] = 'sql'
self.write(*args, **kwargs)
def ipac_write(self, *args, **kwargs):
warnings.warn("WARNING: ipac_write is deprecated; use write instead")
kwargs['type'] = 'ipac'
self.write(*args, **kwargs)
def reset(self):
'''
Empty the table set
'''
self.tables = odict()
self.keywords = {}
self.comments = []
return
def __init__(self, *args, **kwargs):
'''
Create a table set instance
Optional Arguments:
If no arguments are given, an empty table set will be created.
If one of the arguments is a list or a Table instance, then only
this argument will be used.
If one or more arguments are present, they are passed to the read
method
Optional Keyword Arguments (independent of table type):
*masked*: [ True | False ]
Whether to use masked arrays. WARNING: this feature is
experimental and will only work correctly with the svn version
of numpy post-revision 8025. Note that this overrides the
default set by atpy.set_masked_default.
'''
self.reset()
if len(args) == 1:
arg = args[0]
if type(arg) == list:
for table in arg:
self.append(table)
return
elif isinstance(arg, TableSet):
for table in arg.tables:
self.append(table)
return
# Pass arguments to read
if len(args) + len(kwargs) > 0:
self.read(*args, **kwargs)
return
def read(self, *args, **kwargs):
'''
Read in a table set from a file/database.
Optional Keyword Arguments (independent of table type):
*verbose*: [ True | False ]
Whether to print out warnings when reading (default is True)
*type*: [ string ]
The read method attempts to automatically guess the
file/database format based on the arguments supplied. The type
can be overridden by setting this argument.
'''
if 'verbose' in kwargs:
verbose = kwargs['verbose']
else:
verbose = True
if 'type' in kwargs:
table_type = kwargs.pop('type').lower()
elif type(args[0]) == str:
table_type = registry._determine_type(args[0], verbose)
else:
raise Exception('Could not determine table type')
original_filters = warnings.filters[:]
if verbose:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
try:
if verbose:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
if table_type in registry._set_readers:
registry._set_readers[table_type](self, *args, **kwargs)
else:
raise Exception("Unknown table type: " + table_type)
finally:
warnings.filters = original_filters
return
def write(self, *args, **kwargs):
'''
Write out a table set to a file/database.
Optional Keyword Arguments (independent of table type):
*verbose*: [ True | False ]
Whether to print out warnings when writing (default is True)
*type*: [ string ]
The read method attempts to automatically guess the
file/database format based on the arguments supplied. The type
can be overridden by setting this argument.
'''
if 'verbose' in kwargs:
verbose = kwargs.pop('verbose')
else:
verbose = True
if 'type' in kwargs:
table_type = kwargs.pop('type').lower()
elif type(args[0]) == str:
table_type = registry._determine_type(args[0], verbose)
else:
raise Exception('Could not determine table type')
original_filters = warnings.filters[:]
if verbose:
warnings.simplefilter("always")
else:
warnings.simplefilter("ignore")
try:
if table_type in registry._set_writers:
registry._set_writers[table_type](self, *args, **kwargs)
else:
raise Exception("Unknown table type: " + table_type)
finally:
warnings.filters = original_filters
return
def __getitem__(self, item):
return self.tables[item]
def __getattr__(self, attribute):
for table in self.tables:
if attribute == self.tables[table].table_name:
return self.tables[table]
raise AttributeError(attribute)
def append(self, table):
'''
Append a table to the table set
Required Arguments:
*table*: [ a table instance ]
This can be a table of any type, which will be converted
to a table of the same type as the parent set (e.g. adding
a single VOTable to a FITSTableSet will convert the VOTable
to a FITSTable inside the set)
'''
table_key = table.table_name
if table_key in self.tables:
for i in range(1, 10001):
if not "%s.%05i" % (table_key, i) in self.tables:
table_key = "%s.%05i" % (table_key, i)
warnings.warn("There is already a table named %s in the TableSet. Renaming to %s" % (table.table_name, table_key))
break
elif table_key is None:
for i in range(1, 10001):
if not "Untitled.%05i" % i in self.tables:
table_key = "Untitled.%05i" % i
warnings.warn("Table has no name. Setting to %s" % table_key)
break
self.tables[table_key] = table
return
def describe(self):
'''
Describe all the tables in the set
'''
for table in self.tables:
table.describe()
return
def add_comment(self, comment):
'''
Add a comment to the table set
Required Argument:
*comment*: [ string ]
The comment to add to the table
'''
self.comments.append(comment.strip())
return
def add_keyword(self, key, value):
'''
Add a keyword/value pair to the table set
Required Arguments:
*key*: [ string ]
The name of the keyword
*value*: [ string | float | integer | bool ]
The value of the keyword
'''
if type(value) == str:
value = value.strip()
self.keywords[key.strip()] = value
return | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/basetable.py | basetable.py |
.. _tablesets:
===========
Table Sets
===========
A ``TableSet`` instance contains a Python list of individual instances of the ``Table`` class. The advantage of using a ``TableSet`` instead of building a Python list of ``Table`` instances manually is that ATpy allows reading and writing of groups of tables to file formats that support it (e.g. FITS and VO table files or SQL databases).
Initialization
==============
The easiest way to create a table set object is to call the ``TableSet`` class with no arguments::
tset = TableSet()
Manually adding a table to a set
================================
An instance of the ``Table`` class can be added to a set by using the ``append()`` method::
tset.append(t)
where ``t`` is an instance of the ``Table()`` class.
Reading in tables from a file or database
=========================================
The ``read()`` method can be used to read in multiple tables from a file or database. This method automatically determines the file or database type and reads in the tables. For example, all the tables in a VO table can be read in using::
tset.read('somedata.xml')
while all the tables in a FITS file can be read in using::
tset.read('somedata.fits')
As for the ``Table()`` class, in some cases, ``read()`` will fail to determine the input type. In this case, or to override the automatically selected type, the input type can be specified using the type argument::
tset.read('somedata.fits.gz', type='fits')
Any arguments passed to ``TableSet()`` when creating a table instance are passed to the ``read()`` method. This can be used to create a ``TableSet()`` instance and fill it with data in a single line. For example, the following::
tset = TableSet('somedata.xml')
is equivalent to::
tset = TableSet()
tset.read('somedata.xml')
Accessing a single table
========================
Single tables can be accessed through the ``TableSet.tables`` python list. For example, the first table in a set can be accessed with::
tset.tables[0]
And all methods associated with single tables are then available. For example, the following shows how to run the ``describe`` method of the first table in a set::
tset.tables[0].describe()
Adding meta-data
================
As well as having keywords and comments associated with each ``Table``, it is possible to have overall keywords and comments associated with a ``TableSet``.
Comments and keywords can be added to a table using the ``add_comment()`` and ``add_keyword()`` methods::
>>> tset.add_comment("This is a great table set")
>>> tset.add_keyword("version", 314)
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/table_sets.rst | table_sets.rst |
.. _data:
====================
Accessing Table Data
====================
Accessing the data
==================
The table data is stored in a NumPy structured array, which can be accessed by passing the column name a key. This returns the column in question as a NumPy array::
t['column_name']
For convenience, columns with names that satisfy the python variable name requirements (essentially starting with a letter and containing no symbols apart from underscores) can be accessed directly as attributes of the table::
t.column_name
Since the returned data is a NumPy array, individual elements can be accessed using::
t['column_name'][row_number]
or::
t.column_name[row_number]
Both notations can be used to set data in the table, for example::
t.column_name[row_number] = 1
and::
t['column_name'][row_number] = 1
are equivalent, and will set the element at ``row_number`` to 1
Accessing the metadata
======================
The column metadata is stored in the ``columns`` attribute. To see an overview of the metadata, simply use::
>>> t.columns
The metadata for a specific column can then be accessed by specifying the column name as a key::
>>> t.columns['some_column']
or using the column number::
>>> t.columns[column_number]
The attributes of a column object are ``dtype``, ``unit``, ``description``, ``null``, and ``format``.
.. note::
While the unit, description and format for a column can be modified using
the columns attribute, the dtype and null values should not be modified in
this way as the changes will not propagate to the data array.
It is also possible to view a description of the table by using the ``describe`` method of the ``Table`` instance::
>>> t.describe()
In addition to the column metadata, the comments and keywords are available via the ``keywords`` and ``comments`` attributes of the ``Table`` instance, for example::
>>> instrument = t.keywords['instrument']
The ``keywords`` attribute is a dictionary, and the ``comments`` attribute is a list.
Accessing table rows
====================
The ``row(...)`` method can be used to access a specific row in a table::
>>> row = t.row(row_number)
This returns the row as a NumPy record. The row can instead be returned as a tuple of elements with Python types, by using the ``python_types`` argument:
>>> row = t.row(row_number, python_types=True)
Two more powerful methods are available: ``rows`` and ``where``. The ``rows`` method can be used to retrieve specific rows from a table as a new ``Table`` instance::
>>> t_new = t.rows([1,3,5,2,7,8])
Alternatively, the ``where`` method can be given a boolean array to determine which rows should be selected. This is in fact very powerful as the boolean array can actually be written as selection conditions::
>>> t_new = t.where((t.id > 10) & (t.ra < 45.4) & (t.flag == 'ok'))
Global Table properties
=======================
One can access the number of rows in a table by using the python ``len`` function::
>>> len(t)
In addition, the number of rows and columns can also be accessed with the ``shape`` attribute:
>>> t.shape
where the first number is the number of rows, and the second is the number of columns (note that a vector column counts as a single column).
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/data.rst | data.rst |
.. _tables:
====================
Constructing a table
====================
The ``Table`` class is the basic entity in ATpy. It consists of table data
and metadata. The data is stored using a `NumPy <http://numpy.scipy.org/>`_
structured array. The metadata includes units, null values, and column
descriptions, as well as comments and keywords.
Data can be stored in the table using many of the `NumPy types
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html#built-in-scalar-types>`_,
including booleans, 8, 16, 32, and 64-bit signed and unsigned integers, 32
and 64-bit floats, and strings. Not all file formats and databases support
reading and writing all of these types -- for more information, see
:ref:`formats`.
Creating a table
================
The simplest way to create an instance of the ``Table`` is to call the
class with no arguments::
>>> t = atpy.Table()
Populating the table
====================
A table can be populated either manually or by reading data from a file or database. Reading data into a table erases previous content. Data can be manually added once a table has been read in from a file.
Reading data from a file
------------------------
The ``read(...)`` method can be used to read in a table from a file. To date, ATpy supports the following file formats:
* `FITS <http://archive.stsci.edu/fits/fits_standard/>`_ tables (``type=fits``)
* `VO <http://www.ivoa.net/Documents/VOTable/>`_ tables (``type=vo``)
* `IPAC <http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_ tables (``type=ipac``)
* `HDF5 <http://www.hdfgroup.org/HDF5/>`_ (``type=hdf5``)
Now that ATpy has integrates with `asciitable <http://cxc.harvard.edu/contrib/asciitable/>`_, the following formats are also supported:
* `CDS <http://vizier.u-strasbg.fr/doc/catstd.htx>`_ (``type=cds`` or ``type=mrt``)
* DAOPhot (``type=daophot``)
* RDB (``type=rdb``)
* Arbitrary ASCII tables (``type=ascii``)
When reading a table from a file, the only required argument is the filename. For example, to read a VO table called ``example.xml``, the following should be used::
>>> t.read('example.xml')
Auto-detected input type: VO table
The ``read()`` method will in most cases correctly identify the format of the file from the extension. As seen above, the default behavior is to specifically tell the user what format is being assumed, but this can be controlled via the ``verbose`` argument.
In some cases, ``read()`` will fail to determine the input type. In this case, or to override the automatically selected type, the input type can be specified using the ``type`` argument::
>>> t.read('example.xml', type='vo')
The ``read`` method supports additional file-format-dependent options. These are described in more detail in :ref:`formats`.
In cases where multiple tables are available in a table file, ATpy will display a message to the screen with instructions of how to specify which table to read in. Alternatively, see :ref:`tablesets` for information on how to read all tables into a single ``TableSet`` instance.
As a convenience, it is possible to create a ``Table`` instance and read in data in a single command::
>>> t = Table('example.xml')
Any arguments given to ``Table`` are passed on to the ``read`` method, so the above is equivalent to::
>>> t = Table()
>>> t.read('example.xml')
As of 0.9.6, it is now possible to specify URLs starting with ``http://``
or ``ftp://`` and the file will automatically be downloaded. Furthermore,
it is possible to specify files compressed in gzip or bzip format for all
I/O formats.
Reading data from a database
----------------------------
Reading a table from a database is very similar to reading a table from a file. The main difference is that for databases, the first argument should be the database type, To date, ATpy supports the following database types:
* SQLite (``sqlite``)
* MySQL (``mysql``)
* PostGreSQL (``postgres``)
The remaining arguments depend on the database type. For example, an SQLite database can be read by specifying the database filename::
>>> t.read('sqlite','example.db')
For MySQL and PostGreSQL databases, it is possible to specify the database, table, authentication, and host parameters. The various options are descried in more detail in :ref:`formats`. As for files, the ``verbose`` and ``type`` arguments can be used.
As for reading in from files, one can read in data from a database while initializing the ``Table`` object::
>>> t = Table('sqlite','example.db')
.. note::
It is possible to specify a full SQL query using the ``query`` argument.
Any valid SQL is allowed. If this is used, the table name should
nevertheless be specified using the ``table`` argument.
Adding columns to a table
-------------------------
It is possible to add columns to an empty or an existing table. Two methods exist for this. The first, ``add_column``, allows users to add an existing array to a column. For example, the following can be used to add a column named ``time`` where the variable ``time_array`` is a NumPy array::
>>> t.add_column('time', time_array)
The ``add_column`` method also optionally takes metadata about the column, such as units, or a description. For example::
>>> t.add_column('time', time_array, unit='seconds')
indicates that the units of the column are seconds. It is also possible to convert the datatype of an array while adding it to a table by using the ``dtype`` argument. For example, the following stores the column from the above examples as 32-bit floating point values::
>>> t.add_column('time', time_array, unit='seconds', dtype=np.float32)
In some cases, it is desirable to add an empty column to a table, and populate it element by element. This can be done using the ``add_empty_column`` method. The only required arguments for this method are the name and the data type of the column::
>>> t.add_empty_column('id', np.int16)
If the column is the first one being added to an empty table, the ``shape`` argument should be used to specify the number of rows. This should either be an integer giving the number of rows, or a tuple in the case of vector columns (see :ref:`vectorcolumns` for more details)
.. _vectorcolumns:
Vector Columns
--------------
As well as using one-dimensional columns is also possible to specify so-called vector columns, which are essentially two-dimensional arrays. Only FITS and VO tables support reading and writing these. The ``add_column`` method accepts two-dimensional arrays as input, and uses these to define vector columns. Empty vector columns can be created by using the ``add_empty_column`` method along with the ``shape`` argument to specify the full shape of the column. This should be a tuple of the form ``(n_rows, n_elements)``.
Writing the data to a file
--------------------------
Writing data to files or databases is done through the ``write`` method. The arguments to this method are very similar to that of the ``read`` data. The only main difference is that the ``write`` method can take an ``overwrite`` argument that specifies whether or not to overwrite existing files.
Adding meta-data
================
Comments and keywords can be added to a table using the ``add_comment()`` and ``add_keyword()`` methods::
>>> t.add_comment("This is a great table")
>>> t.add_keyword("meaning", 42)
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/tables.rst | tables.rst |
======================
Custom reading/writing
======================
One of the new features introduced in ATpy 0.9.2 is the ability for users to write their own read/write functions and *register* them with ATpy. A read or write function needs to satisfy the following requirements:
* The first argument should be a ``Table`` instance (in the case of a single
table reader/writer) or a ``TableSet`` instance (in the case of a table set
reader/writer)
* The function can take any other arguments, with the exception of the keyword
arguments ``verbose`` and ``type``.
* The function should not return anything, but rather should operate directly
on the table or table set instance passed as the first argument
* If the file format supports masking/null values, the function should take
into account that there are two ways to mask values (see
:ref:`maskingandnull`). The ``Table`` instance has a ``_masked`` attribute
that specifies whether the user wants a Table with masked arrays, or with a
null value. The function should take this into account. For example, in the
built-in FITS reader, the table is populated with ``add_column`` in the
following way::
if self._masked:
self.add_column(name, data, unit=columns.units[i], \
mask=data==columns.nulls[i])
else:
self.add_column(name, data, unit=columns.units[i], \
null=columns.nulls[i])
The reader/writer function can then fill the table by using the ``Table`` methods described in :ref:`api` (for a single table reader/writer) or :ref:`apiset` (for a table set reader/writer). In particular, a single table reader will likely contain calls to ``add_column``, while a single table writer will likely contain references to the ``data`` attribute of ``Table``.
Once a custom function is available, the user can register it using one of the four ATpy functions:
* ``atpy.register_reader``: Register a reader function for single tables
* ``atpy.register_set_reader``: Register a reader function for table sets
* ``atpy.register_writer``: Register a writer function for single tables
* ``atpy.register_set_writer``: Register a writer function for tables sets
The API for these functions is of the form ``(ttype, function, override=True/False)``, where ``ttype`` is the code name for the format (like the build-in ``fits``, ``vo``, ``ipac``, or ``sql`` types), function is the actual function to use, and override allows the user to override existing definitions (for example to provide an improved ``ipac`` reader).
For example, if a function is defined for reading HDF5 tables, which we can call hdf5.read, then one would first need to register this function after importing atpy::
>>> import atpy
>>> atpy.register_reader('hdf5', hdf5.read)
This type can then be used when reading in a table::
>>> t = atpy.Table('mytable.hdf5', type='hdf5')
It is also possible to register extensions for a specific type using ``atpy.register_extensions``. This function expects a table type and a list of file extensions to associate with it. For example, by setting::
>>> atpy.register_extensions('hdf5', ['hdf5', 'hdf'])
One can then read in an HDF5 table without specifying the type::
>>> t = atpy.Table('mytable.hdf5')
We encourage users to send us examples of reader/writer functions for various formats, and would be happy in future to include readers and writers for commonly used formats in ATpy.
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/developers.rst | developers.rst |
.. _format_ascii:
============
ASCII tables
============
.. note::
There are probably as many ASCII table formats as astronomers (if not
more). These generally store a single table, and can sometimes include
meta-data.
Overview
--------
Reading ASCII tables is supported thanks to the `asciitable <http://cxc.harvard.edu/contrib/asciitable/>`_ module, which makes it easy to read in arbitrary ASCII files.
By default, several pre-defined formats are available. These include `CDS <http://vizier.u-strasbg.fr/doc/catstd.htx>`_ tables (also called Machine-Readable tables), DAOPhot tables, and RDB tables. To read these formats, simply use::
>>> t = atpy.Table('table.mrt', type='mrt')
>>> t = atpy.Table('table.cds', type='cds')
>>> t = atpy.Table('table.phot', type='daophot')
>>> t = atpy.Table('table.rdb', type='rdb')
The `type=` argument is optional for these formats, if they have appropriate file extensions, but due to the large number of ASCII file formats, it is safer to include it.
ATpy also allows full access to asciitable. If the ``type='ascii'`` argument is specified in ``Table()``, all arguments are passed to ``asciitable.read``, and the result is automatically stored in the ATpy ``Table`` instance. For more information on the arguments available in ``asciitable.read``, see `here <http://cxc.harvard.edu/contrib/asciitable/#basic-usage-with-read>`_.
.. note::
As for all file formats, the ``verbose`` argument can be specified to
control whether warning messages are shown when reading (the default is
``verbose=True``), and the ``overwrite`` argument can be used when
writing to overwrite a file (the default is ``overwrite=False``).
Full API for advanced users
---------------------------
.. note ::
The following functions should not be called directly - the arguments should be passed to ``Table()/Table.read()``.
.. autofunction:: atpy.asciitables.read_cds
.. autofunction:: atpy.asciitables.read_daophot
.. autofunction:: atpy.asciitables.read_rdb
.. autofunction:: atpy.asciitables.read_ascii
.. autofunction:: atpy.asciitables.write_ascii
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/format_ascii.rst | format_ascii.rst |
.. _format_ipac:
===========
IPAC tables
===========
.. note::
IPAC tables are an ASCII table that can contain a single table. The
format can contain meta-data that consists of keyword values and
comments (analogous to FITS files), and the column headers are
separated by pipe (``|``) symbols that indicate the position of the
columns.
IPAC tables are natively supported in ATpy (no additional module is required). Reading IPAC tables is straightforward::
>>> t = atpy.Table('table.tbl')
and writing a table out in IPAC format is equally easy::
>>> t.write('table.tbl')
IPAC tables can have three different definitions with regard to the alignment of the columns with the pipe symbols in the header. The definition to use is controlled by the ``definition`` argument. The definitions are:
1. Any character below a pipe symbol belongs to the column on the left, and any characters below the first pipe symbol belong to the first column.
2. Any character below a pipe symbol belongs to the column on the right.
3. No characters should be present below the pipe symbols.
The default is ``definition=3``.
.. note::
As for all file formats, the ``verbose`` argument can be specified to
control whether warning messages are shown when reading (the default is
``verbose=True``), and the ``overwrite`` argument can be used when
writing to overwrite a file (the default is ``overwrite=False``).
Full API for advanced users
---------------------------
.. note ::
The following functions should not be called directly - the arguments should be passed to ``Table()/Table.read()`` and
``Table.write()`` respectively.
.. autofunction:: atpy.ipactable.read
.. autofunction:: atpy.ipactable.write | ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/format_ipac.rst | format_ipac.rst |
.. _apiset:
============================
Full API for TableSet class
============================
.. automodule:: atpy
TableSet initialization and I/O
===============================
.. automethod:: TableSet.reset
.. automethod:: TableSet.read
.. automethod:: TableSet.write
Meta-data
=========
.. automethod:: TableSet.add_comment
.. automethod:: TableSet.add_keyword
.. automethod:: TableSet.describe
TableSet manipulation and I/O
===============================
.. automethod:: TableSet.append
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/api_tableset.rst | api_tableset.rst |
.. _format_vo:
=========
VO tables
=========
.. note::
Virtual Observatory (VO) tables are a new format developed by the
International Virtual Observatory Alliance to store one or more tables.
It is a format based on the Extensible Markup Language (XML).
VO tables are supported thanks to the `vo <https://trac6.assembla.com/astrolib>`_ module. Reading VO tables is straightforward::
>>> t = atpy.Table('table.vot')
If more than one table is present in the file, ATpy will give a list of available tables, identified by an ID (``tid``). The specific table to read can then be specified with the ``tid=`` argument::
>>> t = atpy.Table('table.vot', tid=2)
To read in all tables in a file, use the ``TableSet`` class::
>>> t = atpy.TableSet('table.vot')
In some cases, the VO table file may not be strictly standard compliant. When reading in a VO table, it is possible to specify an argument which controls whether to adhere strictly to standards and throw an exception if any errors are found (``pedantic=True``), or whether to relax the requirements and accept non-standard features (``pedantic=False``). The latter is the default.
Finally, when writing out a VO table, the default is to use ASCII VO tables (analogous to ASCII FITS tables). It is also possible to write tables out in binary VO format. To do this, use the ``votype`` argument:
>>> t.write('table.vot', votype='binary')
The default is ``votype='ascii'``.
In the event that ATpy does not recognize a VO table (for example if the file extension is obscure), the type can be explicitly given::
>>> t = atpy.Table('table', type='vo')
.. note::
As for all file formats, the ``verbose`` argument can be specified to
control whether warning messages are shown when reading (the default is
``verbose=True``), and the ``overwrite`` argument can be used when
writing to overwrite a file (the default is ``overwrite=False``).
Full API for advanced users
---------------------------
.. note ::
The following functions should not be called directly - the arguments should be passed to ``Table()/Table.read()``,
``Table.write()``, ``TableSet()/TableSet.read()``, and
``TableSet.write()`` respectively.
.. autofunction:: atpy.votable.read
.. autofunction:: atpy.votable.write
.. autofunction:: atpy.votable.read_set
.. autofunction:: atpy.votable.write_set
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/format_vo.rst | format_vo.rst |
====================================
ATpy - Astronomical Tables in Python
====================================
.. admonition:: Please note!
Much of ATpy's functionality has now been incorporated into
`Astropy <http://www.astropy.org>`_, and while we will continue
to fix bugs, we are no longer actively developing new features in
ATpy , instead focusing our efforts on Astropy. If you are a new
user, and do not need the SQL-querying functionality, we
recommend using `Astropy Tables
<http://docs.astropy.org/en/stable/table>`_ directly. If you are
already using ATpy and are interested in migrating to Astropy,
please read our :doc:`migration`.
`GitHub <https://github.com/atpy/atpy>`_ - `Download latest stable version <https://pypi.python.org/pypi/ATpy/>`_ - `Report Bugs <https://github.com/atpy/atpy/issues>`_
ATpy is a high-level Python package providing a way to manipulate tables of
astronomical data in a uniform way. The two main features of ATpy are:
* It provides a Table class that contains data stored in a NumPy structured
array, along with meta-data to describe the columns, and methods to
manipulate the table (e.g. adding/removing/renaming columns, selecting rows,
changing values, sorting, ...).
* It provides built-in support for reading and writing to several common
file/database formats, including FITS, VO, HDF5, and ASCII tables, as
well as SQLite, MySQL and PostgreSQL databases, with a very simple API.
In addition, ATpy provides a TableSet class that can be used to contain
multiple tables, and supports reading and writing to file/database formats
that support this (FITS, VO, and SQL databases).
Finally, ATpy provides support for user-written read/write functions for
file/database formats not supported by default. We encourage users to send
us custom read/write functions to read commonly used formats, and would be
happy to integrate them into the main distribution.
The following example shows how ATpy can be used to read, convert, and
write a data file from FITS format to VO, HDF5, IPAC, and SQLite formats::
import atpy
tbl = atpy.Table('some_fits_table_file.fits')
# ATpy will automatically try to detect which type of file you're writing.
tbl.write('new_votable.xml') # VO Table
tbl.write('new_ipactable.tbl') # IPAC table
tbl.write('new_ipactable.hdf5') # HDF5 table
tbl.write('sqlite','new_sqlitetable.db') # SQLite database
# You can easily access and modify data in the table:
tbl.some_column[3] = 4.5
tbl.remove_column('some_other_column')
This is only a small fraction of ATpy's functionality. We strongly
recommend that users read through the documentation, which is available
below. For a quick introduction, we recommend the :ref:`tables` and
:ref:`data` sections. For information about format-specific features, see
:ref:`formats`.
Documentation
-------------
.. toctree::
:maxdepth: 1
installation.rst
tables.rst
data.rst
manipulating.rst
table_sets.rst
masking.rst
developers.rst
formats.rst
api_table.rst
api_tableset.rst
Developers
----------
ATpy is developed by `Thomas Robitaille <http://www.mpia-hd.mpg.de/~robitaille/>`_ and `Eli Bressert <http://astrobiased.com>`_.
| ATpy | /ATpy-0.9.7.tar.gz/ATpy-0.9.7/docs/index.rst | index.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.