body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def download_file(url, filename, sourceiss3bucket=None):
'\nDownload the file from `url` and save it locally under `filename`.\n :rtype : bool\n :param url:\n :param filename:\n :param sourceiss3bucket:\n '
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\nurl = {0}\nbucket = {1}\nkey = {2}\nfile = {3}\nException: {4}'.format(url, bucket_name, key_name, filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\nurl = {0}\nbucket = {1}\nkey = {2}\nfile = {3}\nException: {4}'.format(url, bucket_name, key_name, filename, exc))
print('Downloaded file from S3 bucket -- \n url = {0}\n filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
raise SystemError('Unable to download file from web server.\nurl = {0}\nfilename = {1}\nException: {2}'.format(url, filename, exc))
print('Downloaded file from web server -- \n url = {0}\n filename = {1}'.format(url, filename))
return True | -3,808,279,429,678,038,000 | Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket: | MasterScripts/systemprep-linuxmaster.py | download_file | plus3it/SystemPrep | python | def download_file(url, filename, sourceiss3bucket=None):
'\nDownload the file from `url` and save it locally under `filename`.\n :rtype : bool\n :param url:\n :param filename:\n :param sourceiss3bucket:\n '
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\nurl = {0}\nbucket = {1}\nkey = {2}\nfile = {3}\nException: {4}'.format(url, bucket_name, key_name, filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\nurl = {0}\nbucket = {1}\nkey = {2}\nfile = {3}\nException: {4}'.format(url, bucket_name, key_name, filename, exc))
print('Downloaded file from S3 bucket -- \n url = {0}\n filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
raise SystemError('Unable to download file from web server.\nurl = {0}\nfilename = {1}\nException: {2}'.format(url, filename, exc))
print('Downloaded file from web server -- \n url = {0}\n filename = {1}'.format(url, filename))
return True |
def cleanup(workingdir):
'\n Removes temporary files loaded to the system.\n :param workingdir: str, Path to the working directory\n :return: bool\n '
print(('+-' * 40))
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
raise SystemError('Cleanup Failed!\nException: {0}'.format(exc))
print(('Removed temporary data in working directory -- ' + workingdir))
print('Exiting cleanup routine...')
print(('-+' * 40))
return True | 3,193,561,795,270,635,500 | Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool | MasterScripts/systemprep-linuxmaster.py | cleanup | plus3it/SystemPrep | python | def cleanup(workingdir):
'\n Removes temporary files loaded to the system.\n :param workingdir: str, Path to the working directory\n :return: bool\n '
print(('+-' * 40))
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
raise SystemError('Cleanup Failed!\nException: {0}'.format(exc))
print(('Removed temporary data in working directory -- ' + workingdir))
print('Exiting cleanup routine...')
print(('-+' * 40))
return True |
def main(noreboot='false', **kwargs):
'\n Master script that calls content scripts to be deployed when provisioning systems\n '
scriptname = ''
if ('__file__' in dir()):
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
noreboot = ('true' == noreboot.lower())
sourceiss3bucket = ('true' == kwargs.get('sourceiss3bucket', 'false').lower())
print(('+' * 80))
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for (key, value) in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[(- 1)]
fullfilepath = ((systemparams['workingdir'] + systemparams['pathseparator']) + filename)
download_file(url, fullfilepath, sourceiss3bucket)
print(('Running script -- ' + script['ScriptSource']))
print('Sending parameters --')
for (key, value) in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join((("%s='%s'" % (key, val)) for (key, val) in script['Parameters'].iteritems()))
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if (result is not 0):
message = 'Encountered an unrecoverable error executing a content script. Exiting with failure.\nCommand executed: {0}'.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print(('-' * 80)) | 4,525,279,070,260,258,300 | Master script that calls content scripts to be deployed when provisioning systems | MasterScripts/systemprep-linuxmaster.py | main | plus3it/SystemPrep | python | def main(noreboot='false', **kwargs):
'\n \n '
scriptname =
if ('__file__' in dir()):
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
noreboot = ('true' == noreboot.lower())
sourceiss3bucket = ('true' == kwargs.get('sourceiss3bucket', 'false').lower())
print(('+' * 80))
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for (key, value) in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[(- 1)]
fullfilepath = ((systemparams['workingdir'] + systemparams['pathseparator']) + filename)
download_file(url, fullfilepath, sourceiss3bucket)
print(('Running script -- ' + script['ScriptSource']))
print('Sending parameters --')
for (key, value) in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join((("%s='%s'" % (key, val)) for (key, val) in script['Parameters'].iteritems()))
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if (result is not 0):
message = 'Encountered an unrecoverable error executing a content script. Exiting with failure.\nCommand executed: {0}'.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print(('-' * 80)) |
def score(self, x, startprob, transmat):
'Log probability for a given data `x`.\n\n Attributes\n ----------\n x : ndarray\n Data to evaluate.\n %(_doc_default_callparams)s\n\n Returns\n -------\n log_prob : float\n The log probability of the data.\n\n '
log_transmat = np.log((transmat + np.finfo(float).eps))
log_startprob = np.log((startprob + np.finfo(float).eps))
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:(- 1)], x[i, 1:]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum((njk * log_transmat))
return (logp + log_prior) | -3,877,954,204,768,832,500 | Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data. | mlpy/stats/models/_basic.py | score | evenmarbles/mlpy | python | def score(self, x, startprob, transmat):
'Log probability for a given data `x`.\n\n Attributes\n ----------\n x : ndarray\n Data to evaluate.\n %(_doc_default_callparams)s\n\n Returns\n -------\n log_prob : float\n The log probability of the data.\n\n '
log_transmat = np.log((transmat + np.finfo(float).eps))
log_startprob = np.log((startprob + np.finfo(float).eps))
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:(- 1)], x[i, 1:]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum((njk * log_transmat))
return (logp + log_prior) |
def sample(self, startprob, transmat, size=1):
'Sample from a Markov model.\n\n Attributes\n ----------\n size: int\n Defining number of sampled variates. Defaults to `1`.\n\n Returns\n -------\n vals: ndarray\n The sampled sequences of size (nseq, seqlen).\n\n '
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
(nseq, seqlen) = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][(t - 1)]])
return vals | 4,241,172,080,405,026,000 | Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen). | mlpy/stats/models/_basic.py | sample | evenmarbles/mlpy | python | def sample(self, startprob, transmat, size=1):
'Sample from a Markov model.\n\n Attributes\n ----------\n size: int\n Defining number of sampled variates. Defaults to `1`.\n\n Returns\n -------\n vals: ndarray\n The sampled sequences of size (nseq, seqlen).\n\n '
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
(nseq, seqlen) = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][(t - 1)]])
return vals |
def fit(self, x):
'Fit a Markov model from data via MLE or MAP.\n\n Attributes\n ----------\n x : ndarray[int]\n Observed data\n\n Returns\n -------\n %(_doc_default_callparams)s\n\n '
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = ((normalize(np.bincount(x[:, 0])) + pi_pseudo_counts) - 1)
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:(- 1)], x[i, 1:]]).T, 1, size=(nstates, nstates))
transmat = normalize(((counts + transmat_pseudo_counts) - 1), 1)
return (startprob, transmat) | -6,525,996,465,640,089,000 | Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s | mlpy/stats/models/_basic.py | fit | evenmarbles/mlpy | python | def fit(self, x):
'Fit a Markov model from data via MLE or MAP.\n\n Attributes\n ----------\n x : ndarray[int]\n Observed data\n\n Returns\n -------\n %(_doc_default_callparams)s\n\n '
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = ((normalize(np.bincount(x[:, 0])) + pi_pseudo_counts) - 1)
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:(- 1)], x[i, 1:]]).T, 1, size=(nstates, nstates))
transmat = normalize(((counts + transmat_pseudo_counts) - 1), 1)
return (startprob, transmat) |
def __init__(self, startprob, transmat):
'Create a "frozen" Markov model.\n\n Parameters\n ----------\n startprob : array_like\n Start probabilities\n transmat : array_like\n Transition matrix\n\n '
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat | 2,096,330,718,006,145,000 | Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix | mlpy/stats/models/_basic.py | __init__ | evenmarbles/mlpy | python | def __init__(self, startprob, transmat):
'Create a "frozen" Markov model.\n\n Parameters\n ----------\n startprob : array_like\n Start probabilities\n transmat : array_like\n Transition matrix\n\n '
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat |
@classmethod
def getInputSpecification(cls):
'\n Method to get a reference to a class that specifies the input data for\n class cls.\n @ Out, inputSpecification, InputData.ParameterInput, class to use for\n specifying input of cls.\n '
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = 'TimeSeriesAnalysis algorithm for fitting data of degree one or greater.'
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType, descr='Specifies the degree polynomial to fit the data with.'))
return specs | 6,709,305,341,692,526,000 | Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls. | framework/TSA/PolynomialRegression.py | getInputSpecification | archmagethanos/raven | python | @classmethod
def getInputSpecification(cls):
'\n Method to get a reference to a class that specifies the input data for\n class cls.\n @ Out, inputSpecification, InputData.ParameterInput, class to use for\n specifying input of cls.\n '
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = 'TimeSeriesAnalysis algorithm for fitting data of degree one or greater.'
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType, descr='Specifies the degree polynomial to fit the data with.'))
return specs |
def __init__(self, *args, **kwargs):
'\n A constructor that will appropriately intialize a supervised learning object\n @ In, args, list, an arbitrary list of positional values\n @ In, kwargs, dict, an arbitrary dictionary of keywords and values\n @ Out, None\n '
super().__init__(*args, **kwargs) | 1,282,056,979,279,828,700 | A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None | framework/TSA/PolynomialRegression.py | __init__ | archmagethanos/raven | python | def __init__(self, *args, **kwargs):
'\n A constructor that will appropriately intialize a supervised learning object\n @ In, args, list, an arbitrary list of positional values\n @ In, kwargs, dict, an arbitrary dictionary of keywords and values\n @ Out, None\n '
super().__init__(*args, **kwargs) |
def handleInput(self, spec):
'\n Reads user inputs into this object.\n @ In, inp, InputData.InputParams, input specifications\n @ Out, settings, dict, initialization settings for this algorithm\n '
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings | 3,221,797,229,476,569,000 | Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm | framework/TSA/PolynomialRegression.py | handleInput | archmagethanos/raven | python | def handleInput(self, spec):
'\n Reads user inputs into this object.\n @ In, inp, InputData.InputParams, input specifications\n @ Out, settings, dict, initialization settings for this algorithm\n '
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings |
def characterize(self, signal, pivot, targets, settings):
'\n Determines the charactistics of the signal based on this algorithm.\n @ In, signal, np.ndarray, time series with dims [time, target]\n @ In, pivot, np.1darray, time-like parameter values\n @ In, targets, list(str), names of targets in same order as signal\n @ In, settings, dict, additional settings specific to this algorithm\n @ Out, params, dict, characteristic parameters\n '
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape((- 1), 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for (i, value) in enumerate(results.params[1:]):
params[target]['model'][f'coef{(i + 1)}'] = value
params[target]['model']['object'] = results
return params | -1,975,003,618,679,932,400 | Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters | framework/TSA/PolynomialRegression.py | characterize | archmagethanos/raven | python | def characterize(self, signal, pivot, targets, settings):
'\n Determines the charactistics of the signal based on this algorithm.\n @ In, signal, np.ndarray, time series with dims [time, target]\n @ In, pivot, np.1darray, time-like parameter values\n @ In, targets, list(str), names of targets in same order as signal\n @ In, settings, dict, additional settings specific to this algorithm\n @ Out, params, dict, characteristic parameters\n '
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape((- 1), 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for (i, value) in enumerate(results.params[1:]):
params[target]['model'][f'coef{(i + 1)}'] = value
params[target]['model']['object'] = results
return params |
def getParamNames(self, settings):
'\n Return list of expected variable names based on the parameters\n @ In, settings, dict, training parameters for this algorithm\n @ Out, names, list, string list of names\n '
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1, settings['degree']):
names.append(f'{base}__coef{i}')
return names | 1,502,671,060,607,642,000 | Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names | framework/TSA/PolynomialRegression.py | getParamNames | archmagethanos/raven | python | def getParamNames(self, settings):
'\n Return list of expected variable names based on the parameters\n @ In, settings, dict, training parameters for this algorithm\n @ Out, names, list, string list of names\n '
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1, settings['degree']):
names.append(f'{base}__coef{i}')
return names |
def getParamsAsVars(self, params):
'\n Map characterization parameters into flattened variable format\n @ In, params, dict, trained parameters (as from characterize)\n @ Out, rlz, dict, realization-style response\n '
rlz = {}
for (target, info) in params.items():
base = f'{self.name}__{target}'
for (name, value) in info['model'].items():
if (name == 'object'):
continue
rlz[f'{base}__{name}'] = value
return rlz | 5,117,106,150,454,316,000 | Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response | framework/TSA/PolynomialRegression.py | getParamsAsVars | archmagethanos/raven | python | def getParamsAsVars(self, params):
'\n Map characterization parameters into flattened variable format\n @ In, params, dict, trained parameters (as from characterize)\n @ Out, rlz, dict, realization-style response\n '
rlz = {}
for (target, info) in params.items():
base = f'{self.name}__{target}'
for (name, value) in info['model'].items():
if (name == 'object'):
continue
rlz[f'{base}__{name}'] = value
return rlz |
def generate(self, params, pivot, settings):
'\n Generates a synthetic history from fitted parameters.\n @ In, params, dict, characterization such as otained from self.characterize()\n @ In, pivot, np.array(float), pivot parameter values\n @ In, settings, dict, additional settings specific to algorithm\n @ Out, synthetic, np.array(float), synthetic estimated model signal\n '
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape((- 1), 1))
for (t, (target, _)) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic | 1,532,766,129,109,472,000 | Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal | framework/TSA/PolynomialRegression.py | generate | archmagethanos/raven | python | def generate(self, params, pivot, settings):
'\n Generates a synthetic history from fitted parameters.\n @ In, params, dict, characterization such as otained from self.characterize()\n @ In, pivot, np.array(float), pivot parameter values\n @ In, settings, dict, additional settings specific to algorithm\n @ Out, synthetic, np.array(float), synthetic estimated model signal\n '
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape((- 1), 1))
for (t, (target, _)) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic |
def writeXML(self, writeTo, params):
'\n Allows the engine to put whatever it wants into an XML to print to file.\n @ In, writeTo, xmlUtils.StaticXmlElement, entity to write to\n @ In, params, dict, trained parameters as from self.characterize\n @ Out, None\n '
for (target, info) in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for (name, value) in info['model'].items():
if (name == 'object'):
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}')) | 1,898,250,700,010,009,300 | Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None | framework/TSA/PolynomialRegression.py | writeXML | archmagethanos/raven | python | def writeXML(self, writeTo, params):
'\n Allows the engine to put whatever it wants into an XML to print to file.\n @ In, writeTo, xmlUtils.StaticXmlElement, entity to write to\n @ In, params, dict, trained parameters as from self.characterize\n @ Out, None\n '
for (target, info) in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for (name, value) in info['model'].items():
if (name == 'object'):
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}')) |
@pytest.fixture
def client(request):
'\n Definges the client object to make requests against\n '
(db_fd, app.APP.config['DATABASE']) = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
'\n Close the client once testing has completed\n '
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client | -2,865,272,458,671,341,000 | Definges the client object to make requests against | tests/test_rest_file_region.py | client | Multiscale-Genomics/mg-rest-file | python | @pytest.fixture
def client(request):
'\n \n '
(db_fd, app.APP.config['DATABASE']) = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
'\n Close the client once testing has completed\n '
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client |
def test_region_meta(client):
'\n Test that the track endpoint is returning the usage paramerts\n '
rest_value = client.get('/mug/api/dmp/file/whole', headers=dict(Authorization='Authorization: Bearer teststring'))
details = json.loads(rest_value.data)
assert ('usage' in details) | 3,116,075,016,171,612,700 | Test that the track endpoint is returning the usage paramerts | tests/test_rest_file_region.py | test_region_meta | Multiscale-Genomics/mg-rest-file | python | def test_region_meta(client):
'\n \n '
rest_value = client.get('/mug/api/dmp/file/whole', headers=dict(Authorization='Authorization: Bearer teststring'))
details = json.loads(rest_value.data)
assert ('usage' in details) |
def test_region_file(client):
'\n Test that the track endpoint is returning the usage paramerts\n '
rest_value = client.get('/mug/api/dmp/file/region?file_id=testtest0000&chrom=19&start=3000000&end=3100000', headers=dict(Authorization='Authorization: Bearer teststring'))
assert (len(rest_value.data) > 0) | 5,789,364,442,130,801,000 | Test that the track endpoint is returning the usage paramerts | tests/test_rest_file_region.py | test_region_file | Multiscale-Genomics/mg-rest-file | python | def test_region_file(client):
'\n \n '
rest_value = client.get('/mug/api/dmp/file/region?file_id=testtest0000&chrom=19&start=3000000&end=3100000', headers=dict(Authorization='Authorization: Bearer teststring'))
assert (len(rest_value.data) > 0) |
def teardown():
'\n Close the client once testing has completed\n '
os.close(db_fd)
os.unlink(app.APP.config['DATABASE']) | -8,832,247,154,748,192,000 | Close the client once testing has completed | tests/test_rest_file_region.py | teardown | Multiscale-Genomics/mg-rest-file | python | def teardown():
'\n \n '
os.close(db_fd)
os.unlink(app.APP.config['DATABASE']) |
def build_model(model_name, num_features, num_classes):
'Hyper-parameters are determined by auto training, refer to grb.utils.trainer.AutoTrainer.'
if (model_name in ['gcn', 'gcn_ln', 'gcn_at', 'gcn_ln_at']):
from grb.model.torch import GCN
model = GCN(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, layer_norm=(True if ('ln' in model_name) else False), dropout=0.7)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['graphsage', 'graphsage_ln', 'graphsage_at', 'graphsage_ln_at']):
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=5, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.0001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['sgcn', 'sgcn_ln', 'sgcn_at', 'sgcn_ln_at']):
from grb.model.torch import SGCN
model = SGCN(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=4, k=4, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.01, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['tagcn', 'tagcn_ln', 'tagcn_at', 'tagcn_ln_at']):
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=3, k=2, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.005, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['appnp', 'appnp_ln', 'appnp_at', 'appnp_ln_at']):
from grb.model.torch import APPNP
model = APPNP(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, k=3, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gin', 'gin_ln', 'gin_at', 'gin_ln_at']):
from grb.model.torch import GIN
model = GIN(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=2, layer_norm=(True if ('ln' in model_name) else False), dropout=0.6)
train_params = {'lr': 0.0001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gat', 'gat_ln', 'gat_at', 'gat_ln_at']):
from grb.model.dgl import GAT
model = GAT(in_features=num_features, out_features=num_classes, hidden_features=64, n_layers=3, n_heads=6, layer_norm=(True if ('ln' in model_name) else False), dropout=0.6)
train_params = {'lr': 0.005, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['robustgcn', 'robustgcn_at']):
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gcnsvd', 'gcnsvd_ln']):
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gcnguard']):
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gatguard']):
from grb.defense import GATGuard
model = GATGuard(in_features=num_features, out_features=num_classes, hidden_features=64, n_heads=6, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params) | 6,690,898,841,103,739,000 | Hyper-parameters are determined by auto training, refer to grb.utils.trainer.AutoTrainer. | pipeline/configs/grb-citeseer/config.py | build_model | sigeisler/grb | python | def build_model(model_name, num_features, num_classes):
if (model_name in ['gcn', 'gcn_ln', 'gcn_at', 'gcn_ln_at']):
from grb.model.torch import GCN
model = GCN(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, layer_norm=(True if ('ln' in model_name) else False), dropout=0.7)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['graphsage', 'graphsage_ln', 'graphsage_at', 'graphsage_ln_at']):
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=5, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.0001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['sgcn', 'sgcn_ln', 'sgcn_at', 'sgcn_ln_at']):
from grb.model.torch import SGCN
model = SGCN(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=4, k=4, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.01, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['tagcn', 'tagcn_ln', 'tagcn_at', 'tagcn_ln_at']):
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=3, k=2, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.005, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['appnp', 'appnp_ln', 'appnp_at', 'appnp_ln_at']):
from grb.model.torch import APPNP
model = APPNP(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, k=3, layer_norm=(True if ('ln' in model_name) else False), dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gin', 'gin_ln', 'gin_at', 'gin_ln_at']):
from grb.model.torch import GIN
model = GIN(in_features=num_features, out_features=num_classes, hidden_features=256, n_layers=2, layer_norm=(True if ('ln' in model_name) else False), dropout=0.6)
train_params = {'lr': 0.0001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gat', 'gat_ln', 'gat_at', 'gat_ln_at']):
from grb.model.dgl import GAT
model = GAT(in_features=num_features, out_features=num_classes, hidden_features=64, n_layers=3, n_heads=6, layer_norm=(True if ('ln' in model_name) else False), dropout=0.6)
train_params = {'lr': 0.005, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['robustgcn', 'robustgcn_at']):
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gcnsvd', 'gcnsvd_ln']):
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gcnguard']):
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features, out_features=num_classes, hidden_features=128, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params)
if (model_name in ['gatguard']):
from grb.defense import GATGuard
model = GATGuard(in_features=num_features, out_features=num_classes, hidden_features=64, n_heads=6, n_layers=3, dropout=0.5)
train_params = {'lr': 0.001, 'n_epoch': 5000, 'early_stop': True, 'early_stop_patience': 500, 'train_mode': 'inductive'}
return (model, train_params) |
def wrap(lower, upper, x):
'\n Circularly alias the numeric value x into the range [lower,upper).\n\n Valid for cyclic quantities like orientations or hues.\n '
axis_range = (upper - lower)
return (lower + (((x - lower) + ((2.0 * axis_range) * (1.0 - math.floor((x / (2.0 * axis_range)))))) % axis_range)) | 630,713,999,419,601,900 | Circularly alias the numeric value x into the range [lower,upper).
Valid for cyclic quantities like orientations or hues. | featuremapper/distribution.py | wrap | fcr/featuremapper | python | def wrap(lower, upper, x):
'\n Circularly alias the numeric value x into the range [lower,upper).\n\n Valid for cyclic quantities like orientations or hues.\n '
axis_range = (upper - lower)
return (lower + (((x - lower) + ((2.0 * axis_range) * (1.0 - math.floor((x / (2.0 * axis_range)))))) % axis_range)) |
def calc_theta(bins, axis_range):
'\n Convert a bin number to a direction in radians.\n\n Works for NumPy arrays of bin numbers, returning\n an array of directions.\n '
return np.exp(((((2.0 * np.pi) * bins) / axis_range) * 1j)) | 4,120,604,362,412,400,000 | Convert a bin number to a direction in radians.
Works for NumPy arrays of bin numbers, returning
an array of directions. | featuremapper/distribution.py | calc_theta | fcr/featuremapper | python | def calc_theta(bins, axis_range):
'\n Convert a bin number to a direction in radians.\n\n Works for NumPy arrays of bin numbers, returning\n an array of directions.\n '
return np.exp(((((2.0 * np.pi) * bins) / axis_range) * 1j)) |
def data(self):
'\n Answer a dictionary with bins as keys.\n '
return self._data | -4,481,491,216,808,856,600 | Answer a dictionary with bins as keys. | featuremapper/distribution.py | data | fcr/featuremapper | python | def data(self):
'\n \n '
return self._data |
def pop(self, feature_bin):
'\n Remove the entry with bin from the distribution.\n '
if (self._pop_store is not None):
raise Exception('Distribution: attempt to pop value before outstanding restore')
self._pop_store = self._data.pop(feature_bin)
self._keys = list(self._data.keys())
self._values = list(self._data.values()) | -4,856,610,280,572,698,000 | Remove the entry with bin from the distribution. | featuremapper/distribution.py | pop | fcr/featuremapper | python | def pop(self, feature_bin):
'\n \n '
if (self._pop_store is not None):
raise Exception('Distribution: attempt to pop value before outstanding restore')
self._pop_store = self._data.pop(feature_bin)
self._keys = list(self._data.keys())
self._values = list(self._data.values()) |
def restore(self, feature_bin):
'\n Restore the entry with bin from the distribution.\n Only valid if called after a pop.\n '
if (self._pop_store is None):
raise Exception('Distribution: attempt to restore value before pop')
self._data[feature_bin] = self._pop_store
self._pop_store = None
self._keys = list(self._data.keys())
self._values = list(self._data.values()) | 1,775,950,284,836,956,400 | Restore the entry with bin from the distribution.
Only valid if called after a pop. | featuremapper/distribution.py | restore | fcr/featuremapper | python | def restore(self, feature_bin):
'\n Restore the entry with bin from the distribution.\n Only valid if called after a pop.\n '
if (self._pop_store is None):
raise Exception('Distribution: attempt to restore value before pop')
self._data[feature_bin] = self._pop_store
self._pop_store = None
self._keys = list(self._data.keys())
self._values = list(self._data.values()) |
def vector_sum(self):
'\n Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).\n\n Each feature_bin contributes a vector of length equal to its value, at\n a direction corresponding to the feature_bin number. Specifically,\n the total feature_bin number range is mapped into a direction range\n [0,2pi].\n\n For a cyclic distribution, the avgbinnum will be a continuous\n measure analogous to the max_value_bin() of the distribution.\n But this quantity has more precision than max_value_bin()\n because it is computed from the entire distribution instead of\n just the peak feature_bin. However, it is likely to be useful only\n for uniform or very dense sampling; with sparse, non-uniform\n sampling the estimates will be biased significantly by the\n particular samples chosen.\n\n The avgbinnum is not meaningful when the magnitude is 0,\n because a zero-length vector has no direction. To find out\n whether such cases occurred, you can compare the value of\n undefined_vals before and after a series of calls to this\n function.\n \n This tries to use cached values of this.\n\n '
if (self._vector_sum is None):
if (self._theta is None):
self._theta = calc_theta(np.array(self._keys), self.axis_range)
self._vector_sum = self._fast_vector_sum(self._values, self._theta)
return self._vector_sum | 6,490,668,294,525,124,000 | Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
Each feature_bin contributes a vector of length equal to its value, at
a direction corresponding to the feature_bin number. Specifically,
the total feature_bin number range is mapped into a direction range
[0,2pi].
For a cyclic distribution, the avgbinnum will be a continuous
measure analogous to the max_value_bin() of the distribution.
But this quantity has more precision than max_value_bin()
because it is computed from the entire distribution instead of
just the peak feature_bin. However, it is likely to be useful only
for uniform or very dense sampling; with sparse, non-uniform
sampling the estimates will be biased significantly by the
particular samples chosen.
The avgbinnum is not meaningful when the magnitude is 0,
because a zero-length vector has no direction. To find out
whether such cases occurred, you can compare the value of
undefined_vals before and after a series of calls to this
function.
This tries to use cached values of this. | featuremapper/distribution.py | vector_sum | fcr/featuremapper | python | def vector_sum(self):
'\n Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).\n\n Each feature_bin contributes a vector of length equal to its value, at\n a direction corresponding to the feature_bin number. Specifically,\n the total feature_bin number range is mapped into a direction range\n [0,2pi].\n\n For a cyclic distribution, the avgbinnum will be a continuous\n measure analogous to the max_value_bin() of the distribution.\n But this quantity has more precision than max_value_bin()\n because it is computed from the entire distribution instead of\n just the peak feature_bin. However, it is likely to be useful only\n for uniform or very dense sampling; with sparse, non-uniform\n sampling the estimates will be biased significantly by the\n particular samples chosen.\n\n The avgbinnum is not meaningful when the magnitude is 0,\n because a zero-length vector has no direction. To find out\n whether such cases occurred, you can compare the value of\n undefined_vals before and after a series of calls to this\n function.\n \n This tries to use cached values of this.\n\n '
if (self._vector_sum is None):
if (self._theta is None):
self._theta = calc_theta(np.array(self._keys), self.axis_range)
self._vector_sum = self._fast_vector_sum(self._values, self._theta)
return self._vector_sum |
def _fast_vector_sum(self, values, theta):
'\n Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).\n \n This implementation assumes that the values of the distribution needed for the \n vector sum will not be changed and depends on cached values.\n \n '
v_sum = np.inner(values, theta)
magnitude = abs(v_sum)
direction = cmath.phase(v_sum)
if (v_sum == 0):
self.undefined_vals += 1
direction_radians = self._radians_to_bins(direction)
wrapped_direction = wrap(self.axis_bounds[0], self.axis_bounds[1], direction_radians)
return (magnitude, wrapped_direction) | -1,804,977,039,230,122,000 | Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
This implementation assumes that the values of the distribution needed for the
vector sum will not be changed and depends on cached values. | featuremapper/distribution.py | _fast_vector_sum | fcr/featuremapper | python | def _fast_vector_sum(self, values, theta):
'\n Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).\n \n This implementation assumes that the values of the distribution needed for the \n vector sum will not be changed and depends on cached values.\n \n '
v_sum = np.inner(values, theta)
magnitude = abs(v_sum)
direction = cmath.phase(v_sum)
if (v_sum == 0):
self.undefined_vals += 1
direction_radians = self._radians_to_bins(direction)
wrapped_direction = wrap(self.axis_bounds[0], self.axis_bounds[1], direction_radians)
return (magnitude, wrapped_direction) |
def get_value(self, feature_bin):
'\n Return the value of the specified feature_bin.\n\n (Return None if there is no such feature_bin.)\n '
return self._data.get(feature_bin) | -3,871,729,423,209,276,400 | Return the value of the specified feature_bin.
(Return None if there is no such feature_bin.) | featuremapper/distribution.py | get_value | fcr/featuremapper | python | def get_value(self, feature_bin):
'\n Return the value of the specified feature_bin.\n\n (Return None if there is no such feature_bin.)\n '
return self._data.get(feature_bin) |
def get_count(self, feature_bin):
'\n Return the count from the specified feature_bin.\n\n (Return None if there is no such feature_bin.)\n '
return self._counts.get(feature_bin) | 3,929,430,799,493,367,000 | Return the count from the specified feature_bin.
(Return None if there is no such feature_bin.) | featuremapper/distribution.py | get_count | fcr/featuremapper | python | def get_count(self, feature_bin):
'\n Return the count from the specified feature_bin.\n\n (Return None if there is no such feature_bin.)\n '
return self._counts.get(feature_bin) |
def values(self):
'\n Return a list of values.\n\n Various statistics can then be calculated if desired:\n\n sum(vals) (total of all values)\n max(vals) (highest value in any feature_bin)\n\n Note that the feature_bin-order of values returned does not necessarily\n match that returned by counts().\n '
return self._values | 7,122,961,979,136,912,000 | Return a list of values.
Various statistics can then be calculated if desired:
sum(vals) (total of all values)
max(vals) (highest value in any feature_bin)
Note that the feature_bin-order of values returned does not necessarily
match that returned by counts(). | featuremapper/distribution.py | values | fcr/featuremapper | python | def values(self):
'\n Return a list of values.\n\n Various statistics can then be calculated if desired:\n\n sum(vals) (total of all values)\n max(vals) (highest value in any feature_bin)\n\n Note that the feature_bin-order of values returned does not necessarily\n match that returned by counts().\n '
return self._values |
def counts(self):
'\n Return a list of values.\n\n Various statistics can then be calculated if desired:\n\n sum(counts) (total of all counts)\n max(counts) (highest count in any feature_bin)\n\n Note that the feature_bin-order of values returned does not necessarily\n match that returned by values().\n '
return list(self._counts.values()) | -577,602,815,931,901,200 | Return a list of values.
Various statistics can then be calculated if desired:
sum(counts) (total of all counts)
max(counts) (highest count in any feature_bin)
Note that the feature_bin-order of values returned does not necessarily
match that returned by values(). | featuremapper/distribution.py | counts | fcr/featuremapper | python | def counts(self):
'\n Return a list of values.\n\n Various statistics can then be calculated if desired:\n\n sum(counts) (total of all counts)\n max(counts) (highest count in any feature_bin)\n\n Note that the feature_bin-order of values returned does not necessarily\n match that returned by values().\n '
return list(self._counts.values()) |
def bins(self):
'\n Return a list of bins that have been populated.\n '
return self._keys | 8,935,579,620,609,360,000 | Return a list of bins that have been populated. | featuremapper/distribution.py | bins | fcr/featuremapper | python | def bins(self):
'\n \n '
return self._keys |
def sub_distr(self, distr):
'\n Subtract the given distribution from the current one.\n Only existing bins are modified, new bins in the given\n distribution are discarded without raising errors.\n\n Note that total_value and total_count are not affected, and\n keep_peak is ignored, therefore analysis relying on these\n values should not call this method.\n '
for b in distr.bins():
if (b in self.bins()):
v = distr._data.get(b)
if (v is not None):
self._data[b] -= v | 786,399,384,776,559,700 | Subtract the given distribution from the current one.
Only existing bins are modified, new bins in the given
distribution are discarded without raising errors.
Note that total_value and total_count are not affected, and
keep_peak is ignored, therefore analysis relying on these
values should not call this method. | featuremapper/distribution.py | sub_distr | fcr/featuremapper | python | def sub_distr(self, distr):
'\n Subtract the given distribution from the current one.\n Only existing bins are modified, new bins in the given\n distribution are discarded without raising errors.\n\n Note that total_value and total_count are not affected, and\n keep_peak is ignored, therefore analysis relying on these\n values should not call this method.\n '
for b in distr.bins():
if (b in self.bins()):
v = distr._data.get(b)
if (v is not None):
self._data[b] -= v |
def max_value_bin(self):
'\n Return the feature_bin with the largest value.\n \n Note that uses cached values so that pop and restore\n need to be used if want with altered distribution.\n '
return self._keys[np.argmax(self._values)] | 6,829,382,467,599,951,000 | Return the feature_bin with the largest value.
Note that uses cached values so that pop and restore
need to be used if want with altered distribution. | featuremapper/distribution.py | max_value_bin | fcr/featuremapper | python | def max_value_bin(self):
'\n Return the feature_bin with the largest value.\n \n Note that uses cached values so that pop and restore\n need to be used if want with altered distribution.\n '
return self._keys[np.argmax(self._values)] |
def weighted_sum(self):
'Return the sum of each value times its feature_bin.'
return np.inner(self._keys, self._values) | 3,106,585,788,290,091,500 | Return the sum of each value times its feature_bin. | featuremapper/distribution.py | weighted_sum | fcr/featuremapper | python | def weighted_sum(self):
return np.inner(self._keys, self._values) |
def value_mag(self, feature_bin):
'Return the value of a single feature_bin as a proportion of total_value.'
return self._safe_divide(self._data.get(feature_bin), self.total_value) | 3,479,951,534,854,595,600 | Return the value of a single feature_bin as a proportion of total_value. | featuremapper/distribution.py | value_mag | fcr/featuremapper | python | def value_mag(self, feature_bin):
return self._safe_divide(self._data.get(feature_bin), self.total_value) |
def count_mag(self, feature_bin):
'Return the count of a single feature_bin as a proportion of total_count.'
return self._safe_divide(float(self._counts.get(feature_bin)), float(self.total_count)) | -1,181,326,390,964,662,500 | Return the count of a single feature_bin as a proportion of total_count. | featuremapper/distribution.py | count_mag | fcr/featuremapper | python | def count_mag(self, feature_bin):
return self._safe_divide(float(self._counts.get(feature_bin)), float(self.total_count)) |
def _bins_to_radians(self, bin):
'\n Convert a bin number to a direction in radians.\n\n Works for NumPy arrays of bin numbers, returning\n an array of directions.\n '
return (((2 * np.pi) * bin) / self.axis_range) | 2,133,827,564,899,951,900 | Convert a bin number to a direction in radians.
Works for NumPy arrays of bin numbers, returning
an array of directions. | featuremapper/distribution.py | _bins_to_radians | fcr/featuremapper | python | def _bins_to_radians(self, bin):
'\n Convert a bin number to a direction in radians.\n\n Works for NumPy arrays of bin numbers, returning\n an array of directions.\n '
return (((2 * np.pi) * bin) / self.axis_range) |
def _radians_to_bins(self, direction):
'\n Convert a direction in radians into a feature_bin number.\n\n Works for NumPy arrays of direction, returning\n an array of feature_bin numbers.\n '
return ((direction * self.axis_range) / (2 * np.pi)) | -3,051,043,271,045,395,500 | Convert a direction in radians into a feature_bin number.
Works for NumPy arrays of direction, returning
an array of feature_bin numbers. | featuremapper/distribution.py | _radians_to_bins | fcr/featuremapper | python | def _radians_to_bins(self, direction):
'\n Convert a direction in radians into a feature_bin number.\n\n Works for NumPy arrays of direction, returning\n an array of feature_bin numbers.\n '
return ((direction * self.axis_range) / (2 * np.pi)) |
def _safe_divide(self, numerator, denominator):
'\n Division routine that avoids division-by-zero errors\n (returning zero in such cases) but keeps track of them\n for undefined_values().\n '
if (denominator == 0):
self.undefined_vals += 1
return 0
else:
return (numerator / denominator) | -4,667,346,801,391,625,000 | Division routine that avoids division-by-zero errors
(returning zero in such cases) but keeps track of them
for undefined_values(). | featuremapper/distribution.py | _safe_divide | fcr/featuremapper | python | def _safe_divide(self, numerator, denominator):
'\n Division routine that avoids division-by-zero errors\n (returning zero in such cases) but keeps track of them\n for undefined_values().\n '
if (denominator == 0):
self.undefined_vals += 1
return 0
else:
return (numerator / denominator) |
def __call__(self, distribution):
'\n Apply the distribution statistic function; must be implemented by subclasses.\n\n Subclasses sould be called with a Distribution as argument, return will be a\n dictionary, with Pref objects as values\n '
raise NotImplementedError | -1,237,814,911,082,698,200 | Apply the distribution statistic function; must be implemented by subclasses.
Subclasses sould be called with a Distribution as argument, return will be a
dictionary, with Pref objects as values | featuremapper/distribution.py | __call__ | fcr/featuremapper | python | def __call__(self, distribution):
'\n Apply the distribution statistic function; must be implemented by subclasses.\n\n Subclasses sould be called with a Distribution as argument, return will be a\n dictionary, with Pref objects as values\n '
raise NotImplementedError |
def vector_sum(self, d):
'\n Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).\n\n Each bin contributes a vector of length equal to its value, at\n a direction corresponding to the bin number. Specifically,\n the total bin number range is mapped into a direction range\n [0,2pi].\n\n For a cyclic distribution, the avgbinnum will be a continuous\n measure analogous to the max_value_bin() of the distribution.\n But this quantity has more precision than max_value_bin()\n because it is computed from the entire distribution instead of\n just the peak bin. However, it is likely to be useful only\n for uniform or very dense sampling; with sparse, non-uniform\n sampling the estimates will be biased significantly by the\n particular samples chosen.\n\n The avgbinnum is not meaningful when the magnitude is 0,\n because a zero-length vector has no direction. To find out\n whether such cases occurred, you can compare the value of\n undefined_vals before and after a series of calls to this\n function.\n \n This is a slow algorithm and should only be used if the\n contents of the distribution have been changed by the statistical \n function.\n If not, then the cached value in the distribution should be used.\n\n '
h = d.data()
theta = calc_theta(np.array(list(h.keys())), d.axis_range)
return d._fast_vector_sum(list(h.values()), theta) | 6,010,283,714,036,825,000 | Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
Each bin contributes a vector of length equal to its value, at
a direction corresponding to the bin number. Specifically,
the total bin number range is mapped into a direction range
[0,2pi].
For a cyclic distribution, the avgbinnum will be a continuous
measure analogous to the max_value_bin() of the distribution.
But this quantity has more precision than max_value_bin()
because it is computed from the entire distribution instead of
just the peak bin. However, it is likely to be useful only
for uniform or very dense sampling; with sparse, non-uniform
sampling the estimates will be biased significantly by the
particular samples chosen.
The avgbinnum is not meaningful when the magnitude is 0,
because a zero-length vector has no direction. To find out
whether such cases occurred, you can compare the value of
undefined_vals before and after a series of calls to this
function.
This is a slow algorithm and should only be used if the
contents of the distribution have been changed by the statistical
function.
If not, then the cached value in the distribution should be used. | featuremapper/distribution.py | vector_sum | fcr/featuremapper | python | def vector_sum(self, d):
'\n Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).\n\n Each bin contributes a vector of length equal to its value, at\n a direction corresponding to the bin number. Specifically,\n the total bin number range is mapped into a direction range\n [0,2pi].\n\n For a cyclic distribution, the avgbinnum will be a continuous\n measure analogous to the max_value_bin() of the distribution.\n But this quantity has more precision than max_value_bin()\n because it is computed from the entire distribution instead of\n just the peak bin. However, it is likely to be useful only\n for uniform or very dense sampling; with sparse, non-uniform\n sampling the estimates will be biased significantly by the\n particular samples chosen.\n\n The avgbinnum is not meaningful when the magnitude is 0,\n because a zero-length vector has no direction. To find out\n whether such cases occurred, you can compare the value of\n undefined_vals before and after a series of calls to this\n function.\n \n This is a slow algorithm and should only be used if the\n contents of the distribution have been changed by the statistical \n function.\n If not, then the cached value in the distribution should be used.\n\n '
h = d.data()
theta = calc_theta(np.array(list(h.keys())), d.axis_range)
return d._fast_vector_sum(list(h.values()), theta) |
def _weighted_average(self, d):
'\n Return the weighted_sum divided by the sum of the values\n '
return d._safe_divide(d.weighted_sum(), sum(d.values())) | -7,139,313,861,122,571,000 | Return the weighted_sum divided by the sum of the values | featuremapper/distribution.py | _weighted_average | fcr/featuremapper | python | def _weighted_average(self, d):
'\n \n '
return d._safe_divide(d.weighted_sum(), sum(d.values())) |
def selectivity(self, d):
'\n Return a measure of the peakedness of the distribution. The\n calculation differs depending on whether this is a cyclic\n variable. For a cyclic variable, returns the magnitude of the\n vector_sum() divided by the sum_value() (see\n _vector_selectivity for more details). For a non-cyclic\n variable, returns the max_value_bin()) as a proportion of the\n sum_value() (see _relative_selectivity for more details).\n '
if (d.cyclic == True):
return self._vector_selectivity(d)
else:
return self._relative_selectivity(d) | 2,468,232,232,889,291,300 | Return a measure of the peakedness of the distribution. The
calculation differs depending on whether this is a cyclic
variable. For a cyclic variable, returns the magnitude of the
vector_sum() divided by the sum_value() (see
_vector_selectivity for more details). For a non-cyclic
variable, returns the max_value_bin()) as a proportion of the
sum_value() (see _relative_selectivity for more details). | featuremapper/distribution.py | selectivity | fcr/featuremapper | python | def selectivity(self, d):
'\n Return a measure of the peakedness of the distribution. The\n calculation differs depending on whether this is a cyclic\n variable. For a cyclic variable, returns the magnitude of the\n vector_sum() divided by the sum_value() (see\n _vector_selectivity for more details). For a non-cyclic\n variable, returns the max_value_bin()) as a proportion of the\n sum_value() (see _relative_selectivity for more details).\n '
if (d.cyclic == True):
return self._vector_selectivity(d)
else:
return self._relative_selectivity(d) |
def _relative_selectivity(self, d):
'\n Return max_value_bin()) as a proportion of the sum_value().\n\n This quantity is a measure of how strongly the distribution is\n biased towards the max_value_bin(). For a smooth,\n single-lobed distribution with an inclusive, non-cyclic range,\n this quantity is an analog to vector_selectivity. To be a\n precise analog for arbitrary distributions, it would need to\n compute some measure of the selectivity that works like the\n weighted_average() instead of the max_value_bin(). The result\n is scaled such that if all bins are identical, the selectivity\n is 0.0, and if all bins but one are zero, the selectivity is\n 1.0.\n '
if (len(d.data()) <= 1):
return 1.0
proportion = d._safe_divide(max(d.values()), sum(d.values()))
offset = (1.0 / len(d.values()))
scaled = ((proportion - offset) / (1.0 - offset))
if (scaled >= 0.0):
return scaled
else:
return 0.0 | -3,182,837,868,106,845,000 | Return max_value_bin()) as a proportion of the sum_value().
This quantity is a measure of how strongly the distribution is
biased towards the max_value_bin(). For a smooth,
single-lobed distribution with an inclusive, non-cyclic range,
this quantity is an analog to vector_selectivity. To be a
precise analog for arbitrary distributions, it would need to
compute some measure of the selectivity that works like the
weighted_average() instead of the max_value_bin(). The result
is scaled such that if all bins are identical, the selectivity
is 0.0, and if all bins but one are zero, the selectivity is
1.0. | featuremapper/distribution.py | _relative_selectivity | fcr/featuremapper | python | def _relative_selectivity(self, d):
'\n Return max_value_bin()) as a proportion of the sum_value().\n\n This quantity is a measure of how strongly the distribution is\n biased towards the max_value_bin(). For a smooth,\n single-lobed distribution with an inclusive, non-cyclic range,\n this quantity is an analog to vector_selectivity. To be a\n precise analog for arbitrary distributions, it would need to\n compute some measure of the selectivity that works like the\n weighted_average() instead of the max_value_bin(). The result\n is scaled such that if all bins are identical, the selectivity\n is 0.0, and if all bins but one are zero, the selectivity is\n 1.0.\n '
if (len(d.data()) <= 1):
return 1.0
proportion = d._safe_divide(max(d.values()), sum(d.values()))
offset = (1.0 / len(d.values()))
scaled = ((proportion - offset) / (1.0 - offset))
if (scaled >= 0.0):
return scaled
else:
return 0.0 |
def _vector_selectivity(self, d):
'\n Return the magnitude of the vector_sum() divided by the sum_value().\n\n This quantity is a vector-based measure of the peakedness of\n the distribution. If only a single feature_bin has a non-zero value(),\n the selectivity will be 1.0, and if all bins have the same\n value() then the selectivity will be 0.0. Other distributions\n will result in intermediate values.\n\n For a distribution with a sum_value() of zero (i.e. all bins\n empty), the selectivity is undefined. Assuming that one will\n usually be looking for high selectivity, we return zero in such\n a case so that high selectivity will not mistakenly be claimed.\n To find out whether such cases occurred, you can compare the\n value of undefined_values() before and after a series of\n calls to this function.\n '
return d._safe_divide(d.vector_sum()[0], sum(d.values())) | -1,075,925,606,402,771,200 | Return the magnitude of the vector_sum() divided by the sum_value().
This quantity is a vector-based measure of the peakedness of
the distribution. If only a single feature_bin has a non-zero value(),
the selectivity will be 1.0, and if all bins have the same
value() then the selectivity will be 0.0. Other distributions
will result in intermediate values.
For a distribution with a sum_value() of zero (i.e. all bins
empty), the selectivity is undefined. Assuming that one will
usually be looking for high selectivity, we return zero in such
a case so that high selectivity will not mistakenly be claimed.
To find out whether such cases occurred, you can compare the
value of undefined_values() before and after a series of
calls to this function. | featuremapper/distribution.py | _vector_selectivity | fcr/featuremapper | python | def _vector_selectivity(self, d):
'\n Return the magnitude of the vector_sum() divided by the sum_value().\n\n This quantity is a vector-based measure of the peakedness of\n the distribution. If only a single feature_bin has a non-zero value(),\n the selectivity will be 1.0, and if all bins have the same\n value() then the selectivity will be 0.0. Other distributions\n will result in intermediate values.\n\n For a distribution with a sum_value() of zero (i.e. all bins\n empty), the selectivity is undefined. Assuming that one will\n usually be looking for high selectivity, we return zero in such\n a case so that high selectivity will not mistakenly be claimed.\n To find out whether such cases occurred, you can compare the\n value of undefined_values() before and after a series of\n calls to this function.\n '
return d._safe_divide(d.vector_sum()[0], sum(d.values())) |
def second_max_value_bin(self, d):
'\n Return the feature_bin with the second largest value.\n If there is one feature_bin only, return it. This is not a correct result,\n however it is practical for plotting compatibility, and it will not\n mistakenly be claimed as secondary maximum, by forcing its selectivity\n to 0.0\n '
if (len(d.bins()) <= 1):
return d.bins()[0]
k = d.max_value_bin()
d.pop(k)
m = d.max_value_bin()
d.restore(k)
return m | 5,669,550,611,112,714,000 | Return the feature_bin with the second largest value.
If there is one feature_bin only, return it. This is not a correct result,
however it is practical for plotting compatibility, and it will not
mistakenly be claimed as secondary maximum, by forcing its selectivity
to 0.0 | featuremapper/distribution.py | second_max_value_bin | fcr/featuremapper | python | def second_max_value_bin(self, d):
'\n Return the feature_bin with the second largest value.\n If there is one feature_bin only, return it. This is not a correct result,\n however it is practical for plotting compatibility, and it will not\n mistakenly be claimed as secondary maximum, by forcing its selectivity\n to 0.0\n '
if (len(d.bins()) <= 1):
return d.bins()[0]
k = d.max_value_bin()
d.pop(k)
m = d.max_value_bin()
d.restore(k)
return m |
def second_selectivity(self, d):
'\n Return the selectivity of the second largest value in the distribution.\n If there is one feature_bin only, the selectivity is 0, since there is no second\n peack at all, and this value is also used to discriminate the validity\n of second_max_value_bin()\n Selectivity is computed in two ways depending on whether the variable is\n a cyclic, as in selectivity()\n '
if (len(d._data) <= 1):
return 0.0
if (d.cyclic == True):
return self._vector_second_selectivity(d)
else:
return self._relative_second_selectivity(d) | -4,364,512,480,252,476,000 | Return the selectivity of the second largest value in the distribution.
If there is one feature_bin only, the selectivity is 0, since there is no second
peack at all, and this value is also used to discriminate the validity
of second_max_value_bin()
Selectivity is computed in two ways depending on whether the variable is
a cyclic, as in selectivity() | featuremapper/distribution.py | second_selectivity | fcr/featuremapper | python | def second_selectivity(self, d):
'\n Return the selectivity of the second largest value in the distribution.\n If there is one feature_bin only, the selectivity is 0, since there is no second\n peack at all, and this value is also used to discriminate the validity\n of second_max_value_bin()\n Selectivity is computed in two ways depending on whether the variable is\n a cyclic, as in selectivity()\n '
if (len(d._data) <= 1):
return 0.0
if (d.cyclic == True):
return self._vector_second_selectivity(d)
else:
return self._relative_second_selectivity(d) |
def _relative_second_selectivity(self, d):
'\n Return the value of the second maximum as a proportion of the sum_value()\n see _relative_selectivity() for further details\n '
k = d.max_value_bin()
d.pop(k)
m = max(d.values())
d.restore(k)
proportion = d._safe_divide(m, sum(d.values()))
offset = (1.0 / len(d.data()))
scaled = ((proportion - offset) / (1.0 - offset))
return max(scaled, 0.0) | -8,135,196,164,067,553,000 | Return the value of the second maximum as a proportion of the sum_value()
see _relative_selectivity() for further details | featuremapper/distribution.py | _relative_second_selectivity | fcr/featuremapper | python | def _relative_second_selectivity(self, d):
'\n Return the value of the second maximum as a proportion of the sum_value()\n see _relative_selectivity() for further details\n '
k = d.max_value_bin()
d.pop(k)
m = max(d.values())
d.restore(k)
proportion = d._safe_divide(m, sum(d.values()))
offset = (1.0 / len(d.data()))
scaled = ((proportion - offset) / (1.0 - offset))
return max(scaled, 0.0) |
def _vector_second_selectivity(self, d):
'\n Return the magnitude of the vector_sum() of all bins excluding the\n maximum one, divided by the sum_value().\n see _vector_selectivity() for further details\n '
k = d.max_value_bin()
d.pop(k)
s = self.vector_sum(d)[0]
d.restore(k)
return self._safe_divide(s, sum(d.values())) | -8,547,644,693,540,583,000 | Return the magnitude of the vector_sum() of all bins excluding the
maximum one, divided by the sum_value().
see _vector_selectivity() for further details | featuremapper/distribution.py | _vector_second_selectivity | fcr/featuremapper | python | def _vector_second_selectivity(self, d):
'\n Return the magnitude of the vector_sum() of all bins excluding the\n maximum one, divided by the sum_value().\n see _vector_selectivity() for further details\n '
k = d.max_value_bin()
d.pop(k)
s = self.vector_sum(d)[0]
d.restore(k)
return self._safe_divide(s, sum(d.values())) |
def second_peak_bin(self, d):
"\n Return the feature_bin with the second peak in the distribution.\n Unlike second_max_value_bin(), it does not return a feature_bin which is the\n second largest value, if laying on a wing of the first peak, the second\n peak is returned only if the distribution is truly multimodal. If it isn't,\n return the first peak (for compatibility with numpy array type, and\n plotting compatibility), however the corresponding selectivity will be\n forced to 0.0\n "
h = d.data()
l = len(h)
if (l <= 1):
return d.keys()[0]
ks = list(h.keys())
ks.sort()
ik0 = ks.index(d.keys()[np.argmax(d.values())])
k0 = ks[ik0]
v0 = h[k0]
v = v0
k = k0
ik = ik0
while (h[k] <= v):
ik += 1
if (ik >= l):
ik = 0
if (ik == ik0):
return k0
v = h[k]
k = ks[ik]
ik1 = ik
v = v0
k = k0
ik = ik0
while (h[k] <= v):
ik -= 1
if (ik < 0):
ik = (l - 1)
if (ik == ik0):
return k0
v = h[k]
k = ks[ik]
ik2 = ik
if (ik1 == ik2):
return ks[ik1]
ik = ik1
m = 0
while (ik != ik2):
k = ks[ik]
if (h[k] > m):
m = h[k]
im = ik
ik += 1
if (ik >= l):
ik = 0
return ks[im] | 3,829,011,528,027,905,000 | Return the feature_bin with the second peak in the distribution.
Unlike second_max_value_bin(), it does not return a feature_bin which is the
second largest value, if laying on a wing of the first peak, the second
peak is returned only if the distribution is truly multimodal. If it isn't,
return the first peak (for compatibility with numpy array type, and
plotting compatibility), however the corresponding selectivity will be
forced to 0.0 | featuremapper/distribution.py | second_peak_bin | fcr/featuremapper | python | def second_peak_bin(self, d):
"\n Return the feature_bin with the second peak in the distribution.\n Unlike second_max_value_bin(), it does not return a feature_bin which is the\n second largest value, if laying on a wing of the first peak, the second\n peak is returned only if the distribution is truly multimodal. If it isn't,\n return the first peak (for compatibility with numpy array type, and\n plotting compatibility), however the corresponding selectivity will be\n forced to 0.0\n "
h = d.data()
l = len(h)
if (l <= 1):
return d.keys()[0]
ks = list(h.keys())
ks.sort()
ik0 = ks.index(d.keys()[np.argmax(d.values())])
k0 = ks[ik0]
v0 = h[k0]
v = v0
k = k0
ik = ik0
while (h[k] <= v):
ik += 1
if (ik >= l):
ik = 0
if (ik == ik0):
return k0
v = h[k]
k = ks[ik]
ik1 = ik
v = v0
k = k0
ik = ik0
while (h[k] <= v):
ik -= 1
if (ik < 0):
ik = (l - 1)
if (ik == ik0):
return k0
v = h[k]
k = ks[ik]
ik2 = ik
if (ik1 == ik2):
return ks[ik1]
ik = ik1
m = 0
while (ik != ik2):
k = ks[ik]
if (h[k] > m):
m = h[k]
im = ik
ik += 1
if (ik >= l):
ik = 0
return ks[im] |
def second_peak_selectivity(self, d):
'\n Return the selectivity of the second peak in the distribution.\n\n If the distribution has only one peak, return 0.0, and this value is\n also usefl to discriminate the validity of second_peak_bin()\n '
l = len(d.keys())
if (l <= 1):
return 0.0
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if (p1 == p2):
return 0.0
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = (1.0 / l)
scaled = ((proportion - offset) / (1.0 - offset))
return max(scaled, 0.0) | 248,254,795,784,561,150 | Return the selectivity of the second peak in the distribution.
If the distribution has only one peak, return 0.0, and this value is
also usefl to discriminate the validity of second_peak_bin() | featuremapper/distribution.py | second_peak_selectivity | fcr/featuremapper | python | def second_peak_selectivity(self, d):
'\n Return the selectivity of the second peak in the distribution.\n\n If the distribution has only one peak, return 0.0, and this value is\n also usefl to discriminate the validity of second_peak_bin()\n '
l = len(d.keys())
if (l <= 1):
return 0.0
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if (p1 == p2):
return 0.0
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = (1.0 / l)
scaled = ((proportion - offset) / (1.0 - offset))
return max(scaled, 0.0) |
def second_peak(self, d):
'\n Return preference and selectivity of the second peak in the distribution.\n\n It is just the combination of second_peak_bin() and\n second_peak_selectivity(), with the advantage of avoiding a duplicate\n call of second_peak_bin(), if the user is interested in both preference\n and selectivity, as often is the case.\n '
l = len(d.keys())
if (l <= 1):
return (d.keys()[0], 0.0)
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if (p1 == p2):
return (p1, 0.0)
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = (1.0 / l)
scaled = ((proportion - offset) / (1.0 - offset))
return (p2, max(scaled, 0.0)) | 2,059,865,115,646,842,600 | Return preference and selectivity of the second peak in the distribution.
It is just the combination of second_peak_bin() and
second_peak_selectivity(), with the advantage of avoiding a duplicate
call of second_peak_bin(), if the user is interested in both preference
and selectivity, as often is the case. | featuremapper/distribution.py | second_peak | fcr/featuremapper | python | def second_peak(self, d):
'\n Return preference and selectivity of the second peak in the distribution.\n\n It is just the combination of second_peak_bin() and\n second_peak_selectivity(), with the advantage of avoiding a duplicate\n call of second_peak_bin(), if the user is interested in both preference\n and selectivity, as often is the case.\n '
l = len(d.keys())
if (l <= 1):
return (d.keys()[0], 0.0)
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if (p1 == p2):
return (p1, 0.0)
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = (1.0 / l)
scaled = ((proportion - offset) / (1.0 - offset))
return (p2, max(scaled, 0.0)) |
def _orth(self, t):
'\n Return the orthogonal orientation\n '
if (t < (0.5 * np.pi)):
return (t + (0.5 * np.pi))
return (t - (0.5 * np.pi)) | 3,040,861,717,531,347,500 | Return the orthogonal orientation | featuremapper/distribution.py | _orth | fcr/featuremapper | python | def _orth(self, t):
'\n \n '
if (t < (0.5 * np.pi)):
return (t + (0.5 * np.pi))
return (t - (0.5 * np.pi)) |
def _in_pi(self, t):
'\n Reduce orientation from -pi..2pi to 0..pi\n '
if (t > np.pi):
return (t - np.pi)
if (t < 0):
return (t + np.pi)
return t | 2,151,608,601,211,757,800 | Reduce orientation from -pi..2pi to 0..pi | featuremapper/distribution.py | _in_pi | fcr/featuremapper | python | def _in_pi(self, t):
'\n \n '
if (t > np.pi):
return (t - np.pi)
if (t < 0):
return (t + np.pi)
return t |
def von_mises(self, pars, x):
'\n Compute a simplified von Mises function.\n\n Original formulation in Richard von Mises, "Wahrscheinlichkeitsrechnung\n und ihre Anwendungen in der Statistik und theoretischen Physik", 1931,\n Deuticke, Leipzig; see also Mardia, K.V. and Jupp, P.E., " Directional\n Statistics", 1999, J. Wiley, p.36;\n http://en.wikipedia.org/wiki/Von_Mises_distribution\n The two differences are that this function is a continuous probability\n distribution on a semi-circle, while von Mises is on the full circle,\n and that the normalization factor, which is the inverse of the modified\n Bessel function of first kind and 0 degree in the original, is here a fit parameter.\n '
(a, k, t) = pars
return (a * np.exp((k * (np.cos((2 * (x - t))) - 1)))) | 803,809,184,716,551,800 | Compute a simplified von Mises function.
Original formulation in Richard von Mises, "Wahrscheinlichkeitsrechnung
und ihre Anwendungen in der Statistik und theoretischen Physik", 1931,
Deuticke, Leipzig; see also Mardia, K.V. and Jupp, P.E., " Directional
Statistics", 1999, J. Wiley, p.36;
http://en.wikipedia.org/wiki/Von_Mises_distribution
The two differences are that this function is a continuous probability
distribution on a semi-circle, while von Mises is on the full circle,
and that the normalization factor, which is the inverse of the modified
Bessel function of first kind and 0 degree in the original, is here a fit parameter. | featuremapper/distribution.py | von_mises | fcr/featuremapper | python | def von_mises(self, pars, x):
'\n Compute a simplified von Mises function.\n\n Original formulation in Richard von Mises, "Wahrscheinlichkeitsrechnung\n und ihre Anwendungen in der Statistik und theoretischen Physik", 1931,\n Deuticke, Leipzig; see also Mardia, K.V. and Jupp, P.E., " Directional\n Statistics", 1999, J. Wiley, p.36;\n http://en.wikipedia.org/wiki/Von_Mises_distribution\n The two differences are that this function is a continuous probability\n distribution on a semi-circle, while von Mises is on the full circle,\n and that the normalization factor, which is the inverse of the modified\n Bessel function of first kind and 0 degree in the original, is here a fit parameter.\n '
(a, k, t) = pars
return (a * np.exp((k * (np.cos((2 * (x - t))) - 1)))) |
def von2_mises(self, pars, x):
'\n Compute a simplified bimodal von Mises function\n\n Two superposed von Mises functions, with different peak and bandwith values\n '
p1 = pars[:3]
p2 = pars[3:]
return (self.von_mises(p1, x) + self.von_mises(p2, x)) | -2,769,227,981,127,466,500 | Compute a simplified bimodal von Mises function
Two superposed von Mises functions, with different peak and bandwith values | featuremapper/distribution.py | von2_mises | fcr/featuremapper | python | def von2_mises(self, pars, x):
'\n Compute a simplified bimodal von Mises function\n\n Two superposed von Mises functions, with different peak and bandwith values\n '
p1 = pars[:3]
p2 = pars[3:]
return (self.von_mises(p1, x) + self.von_mises(p2, x)) |
def fit_vm(self, distribution):
'\n computes the best fit of the monovariate von Mises function in the\n semi-circle.\n Return a tuple with the orientation preference, in the same range of\n axis_bounds, the orientation selectivity, and an estimate of the\n goodness-of-fit, as the variance of the predicted orientation\n preference. The selectivity is given by the bandwith parameter of the\n von Mises function, modified for compatibility with other selectivity\n computations in this class. The bandwith parameter is transposed in\n logaritmic scale, and is normalized by the maximum value for the number\n of bins in the distribution, in order to give roughly 1.0 for a\n distribution with one feature_bin at 1.0 an all the other at 0.0, and 0.0 for\n uniform distributions. The normalizing factor of the selectivity is fit\n for the total number of bins, using fit parameters computed offline.\n There are conditions that prevents apriori the possibility to fit the\n distribution:\n * not enough bins, at least 4 are necessary\n * the distribution is too flat, below the noise level\n and conditions of aposteriori failures:\n * "ier" flag returned by leastsq out of ( 1, 2, 3, 4 )\n * no estimated Jacobian around the solution\n * negative bandwith (the peak of the distribution is convex)\n Note that these are the minimal conditions, their fulfillment does not\n warrant unimodality, is up to the user to check the goodness-of-fit value\n for an accurate acceptance of the fit.\n '
if unavailable_scipy_optimize:
if (not VonMisesStatisticFn.user_warned_if_unavailable):
param.Parameterized().warning('scipy.optimize not available, dummy von Mises fit')
VonMisesStatisticFn.user_warned_if_unavailable = True
self.fit_exit_code = 3
return (0, 0, 0)
to_pi = (np.pi / distribution.axis_range)
x = (to_pi * np.array(distribution.bins()))
n = len(x)
if (n < 5):
param.Parameterized().warning('No von Mises fit possible with less than 4 bins')
self.fit_exit_code = (- 1)
return (0, 0, 0)
y = np.array(distribution.values())
if (y.std() < self.noise_level):
self.fit_exit_code = 1
return (0, 0, 0)
rn = (self.noise_level * np.random.random_sample(y.shape))
p0 = (1.0, 1.0, distribution.max_value_bin())
r = optimize.leastsq(self.von_mises_res, p0, args=(x, (y + rn)), full_output=True)
if (not (r[(- 1)] in (1, 2, 3, 4))):
self.fit_exit_code = (100 + r[(- 1)])
return (0, 0, 0)
residuals = r[2]['fvec']
jacobian = r[1]
bandwith = r[0][1]
tuning = r[0][2]
if (bandwith < 0):
self.fit_exit_code = 1
return (0, 0, 0)
if (jacobian is None):
self.fit_exit_code = 2
return (0, 0, 0)
error = ((residuals ** 2).sum() / (n - len(p0)))
covariance = (jacobian * error)
g = covariance[(2, 2)]
p = (self._in_pi(tuning) / to_pi)
s = self.norm_sel(bandwith, n)
self.fit_exit_code = 0
return (p, s, g) | -6,745,883,549,451,692,000 | computes the best fit of the monovariate von Mises function in the
semi-circle.
Return a tuple with the orientation preference, in the same range of
axis_bounds, the orientation selectivity, and an estimate of the
goodness-of-fit, as the variance of the predicted orientation
preference. The selectivity is given by the bandwith parameter of the
von Mises function, modified for compatibility with other selectivity
computations in this class. The bandwith parameter is transposed in
logaritmic scale, and is normalized by the maximum value for the number
of bins in the distribution, in order to give roughly 1.0 for a
distribution with one feature_bin at 1.0 an all the other at 0.0, and 0.0 for
uniform distributions. The normalizing factor of the selectivity is fit
for the total number of bins, using fit parameters computed offline.
There are conditions that prevents apriori the possibility to fit the
distribution:
* not enough bins, at least 4 are necessary
* the distribution is too flat, below the noise level
and conditions of aposteriori failures:
* "ier" flag returned by leastsq out of ( 1, 2, 3, 4 )
* no estimated Jacobian around the solution
* negative bandwith (the peak of the distribution is convex)
Note that these are the minimal conditions, their fulfillment does not
warrant unimodality, is up to the user to check the goodness-of-fit value
for an accurate acceptance of the fit. | featuremapper/distribution.py | fit_vm | fcr/featuremapper | python | def fit_vm(self, distribution):
'\n computes the best fit of the monovariate von Mises function in the\n semi-circle.\n Return a tuple with the orientation preference, in the same range of\n axis_bounds, the orientation selectivity, and an estimate of the\n goodness-of-fit, as the variance of the predicted orientation\n preference. The selectivity is given by the bandwith parameter of the\n von Mises function, modified for compatibility with other selectivity\n computations in this class. The bandwith parameter is transposed in\n logaritmic scale, and is normalized by the maximum value for the number\n of bins in the distribution, in order to give roughly 1.0 for a\n distribution with one feature_bin at 1.0 an all the other at 0.0, and 0.0 for\n uniform distributions. The normalizing factor of the selectivity is fit\n for the total number of bins, using fit parameters computed offline.\n There are conditions that prevents apriori the possibility to fit the\n distribution:\n * not enough bins, at least 4 are necessary\n * the distribution is too flat, below the noise level\n and conditions of aposteriori failures:\n * "ier" flag returned by leastsq out of ( 1, 2, 3, 4 )\n * no estimated Jacobian around the solution\n * negative bandwith (the peak of the distribution is convex)\n Note that these are the minimal conditions, their fulfillment does not\n warrant unimodality, is up to the user to check the goodness-of-fit value\n for an accurate acceptance of the fit.\n '
if unavailable_scipy_optimize:
if (not VonMisesStatisticFn.user_warned_if_unavailable):
param.Parameterized().warning('scipy.optimize not available, dummy von Mises fit')
VonMisesStatisticFn.user_warned_if_unavailable = True
self.fit_exit_code = 3
return (0, 0, 0)
to_pi = (np.pi / distribution.axis_range)
x = (to_pi * np.array(distribution.bins()))
n = len(x)
if (n < 5):
param.Parameterized().warning('No von Mises fit possible with less than 4 bins')
self.fit_exit_code = (- 1)
return (0, 0, 0)
y = np.array(distribution.values())
if (y.std() < self.noise_level):
self.fit_exit_code = 1
return (0, 0, 0)
rn = (self.noise_level * np.random.random_sample(y.shape))
p0 = (1.0, 1.0, distribution.max_value_bin())
r = optimize.leastsq(self.von_mises_res, p0, args=(x, (y + rn)), full_output=True)
if (not (r[(- 1)] in (1, 2, 3, 4))):
self.fit_exit_code = (100 + r[(- 1)])
return (0, 0, 0)
residuals = r[2]['fvec']
jacobian = r[1]
bandwith = r[0][1]
tuning = r[0][2]
if (bandwith < 0):
self.fit_exit_code = 1
return (0, 0, 0)
if (jacobian is None):
self.fit_exit_code = 2
return (0, 0, 0)
error = ((residuals ** 2).sum() / (n - len(p0)))
covariance = (jacobian * error)
g = covariance[(2, 2)]
p = (self._in_pi(tuning) / to_pi)
s = self.norm_sel(bandwith, n)
self.fit_exit_code = 0
return (p, s, g) |
def fit_v2m(self, distribution):
'\n computes the best fit of the bivariate von Mises function in the\n semi-circle.\n Return the tuple:\n (\n orientation1_preference, orientation1_selectivity, goodness_of_fit1,\n orientation2_preference, orientation2_selectivity, goodness_of_fit2\n )\n See fit_vm() for considerations about selectivity and goodness_of_fit\n '
null = (0, 0, 0, 0, 0, 0)
if unavailable_scipy_optimize:
if (not VonMisesStatisticFn.user_warned_if_unavailable):
param.Parameterized().warning('scipy.optimize not available, dummy von Mises fit')
VonMisesStatisticFn.user_warned_if_unavailable = True
self.fit_exit_code = 3
return null
to_pi = (np.pi / distribution.axis_range)
x = (to_pi * np.array(distribution.bins()))
n = len(x)
if (n < 9):
param.Parameterized().warning('no bimodal von Mises fit possible with less than 8 bins')
self.fit_exit_code = (- 1)
return null
y = np.array(distribution.values())
if (y.std() < self.noise_level):
self.fit_exit_code = 1
return null
rn = (self.noise_level * np.random.random_sample(y.shape))
t0 = distribution.max_value_bin()
p0 = (1.0, 1.0, t0, 1.0, 1.0, self._orth(t0))
r = optimize.leastsq(self.von2_mises_res, p0, args=(x, (y + rn)), full_output=True)
if (not (r[(- 1)] in (1, 2, 3, 4))):
self.fit_exit_code = (100 + r[(- 1)])
return null
residuals = r[2]['fvec']
jacobian = r[1]
bandwith_1 = r[0][1]
tuning_1 = r[0][2]
bandwith_2 = r[0][4]
tuning_2 = r[0][5]
if (jacobian is None):
self.fit_exit_code = 2
return null
if (bandwith_1 < 0):
self.fit_exit_code = 1
return null
if (bandwith_2 < 0):
self.fit_exit_code = 1
return null
error = ((residuals ** 2).sum() / (n - len(p0)))
covariance = (jacobian * error)
g1 = covariance[(2, 2)]
g2 = covariance[(5, 5)]
p1 = (self._in_pi(tuning_1) / to_pi)
p2 = (self._in_pi(tuning_2) / to_pi)
s1 = self.norm_sel(bandwith_1, n)
s2 = self.norm_sel(bandwith_2, n)
self.fit_exit_code = 0
return (p1, s1, g1, p2, s2, g2) | 2,784,568,525,277,119,500 | computes the best fit of the bivariate von Mises function in the
semi-circle.
Return the tuple:
(
orientation1_preference, orientation1_selectivity, goodness_of_fit1,
orientation2_preference, orientation2_selectivity, goodness_of_fit2
)
See fit_vm() for considerations about selectivity and goodness_of_fit | featuremapper/distribution.py | fit_v2m | fcr/featuremapper | python | def fit_v2m(self, distribution):
'\n computes the best fit of the bivariate von Mises function in the\n semi-circle.\n Return the tuple:\n (\n orientation1_preference, orientation1_selectivity, goodness_of_fit1,\n orientation2_preference, orientation2_selectivity, goodness_of_fit2\n )\n See fit_vm() for considerations about selectivity and goodness_of_fit\n '
null = (0, 0, 0, 0, 0, 0)
if unavailable_scipy_optimize:
if (not VonMisesStatisticFn.user_warned_if_unavailable):
param.Parameterized().warning('scipy.optimize not available, dummy von Mises fit')
VonMisesStatisticFn.user_warned_if_unavailable = True
self.fit_exit_code = 3
return null
to_pi = (np.pi / distribution.axis_range)
x = (to_pi * np.array(distribution.bins()))
n = len(x)
if (n < 9):
param.Parameterized().warning('no bimodal von Mises fit possible with less than 8 bins')
self.fit_exit_code = (- 1)
return null
y = np.array(distribution.values())
if (y.std() < self.noise_level):
self.fit_exit_code = 1
return null
rn = (self.noise_level * np.random.random_sample(y.shape))
t0 = distribution.max_value_bin()
p0 = (1.0, 1.0, t0, 1.0, 1.0, self._orth(t0))
r = optimize.leastsq(self.von2_mises_res, p0, args=(x, (y + rn)), full_output=True)
if (not (r[(- 1)] in (1, 2, 3, 4))):
self.fit_exit_code = (100 + r[(- 1)])
return null
residuals = r[2]['fvec']
jacobian = r[1]
bandwith_1 = r[0][1]
tuning_1 = r[0][2]
bandwith_2 = r[0][4]
tuning_2 = r[0][5]
if (jacobian is None):
self.fit_exit_code = 2
return null
if (bandwith_1 < 0):
self.fit_exit_code = 1
return null
if (bandwith_2 < 0):
self.fit_exit_code = 1
return null
error = ((residuals ** 2).sum() / (n - len(p0)))
covariance = (jacobian * error)
g1 = covariance[(2, 2)]
g2 = covariance[(5, 5)]
p1 = (self._in_pi(tuning_1) / to_pi)
p2 = (self._in_pi(tuning_2) / to_pi)
s1 = self.norm_sel(bandwith_1, n)
s2 = self.norm_sel(bandwith_2, n)
self.fit_exit_code = 0
return (p1, s1, g1, p2, s2, g2) |
def __call__(self, distribution):
'\n Apply the distribution statistic function; must be implemented by subclasses.\n\n '
raise NotImplementedError | 195,079,524,237,858,200 | Apply the distribution statistic function; must be implemented by subclasses. | featuremapper/distribution.py | __call__ | fcr/featuremapper | python | def __call__(self, distribution):
'\n \n\n '
raise NotImplementedError |
def _analyze_distr(self, d):
'\n Analyze the given distribution with von Mises bimodal fit.\n\n The distribution is analyzed with both unimodal and bimodal fits, and a\n decision about the number of modes is made by comparing the goodness of\n fit. It is a quick but inaccurate way of estimating the number of modes.\n Return preference, selectivity, goodness of fit for both modes, and the\n estimated numer of modes, None if even the unimodal fit failed. If the\n distribution is unimodal, values of the second mode are set to 0. The main\n mode is always the one with the largest selectivity (von Mises bandwith).\n '
no1 = False
f = self.fit_vm(d)
if (self.fit_exit_code != 0):
no1 = True
(p, s, g) = f
f2 = self.fit_v2m(d)
if ((self.fit_exit_code != 0) or (f2[2] > self.worst_fit)):
if (no1 or (f[(- 1)] > self.worst_fit)):
return None
return (p, s, g, 0, 0, 0, 1)
(p1, s1, g1, p2, s2, g2) = f2
if (g1 > g):
return (p, s, g, 0, 0, 0, 1)
if (s2 > s1):
return (p2, s2, g2, p1, s1, g1, 2)
return (p1, s1, g1, p2, s2, g2, 2) | -6,726,496,357,450,420,000 | Analyze the given distribution with von Mises bimodal fit.
The distribution is analyzed with both unimodal and bimodal fits, and a
decision about the number of modes is made by comparing the goodness of
fit. It is a quick but inaccurate way of estimating the number of modes.
Return preference, selectivity, goodness of fit for both modes, and the
estimated numer of modes, None if even the unimodal fit failed. If the
distribution is unimodal, values of the second mode are set to 0. The main
mode is always the one with the largest selectivity (von Mises bandwith). | featuremapper/distribution.py | _analyze_distr | fcr/featuremapper | python | def _analyze_distr(self, d):
'\n Analyze the given distribution with von Mises bimodal fit.\n\n The distribution is analyzed with both unimodal and bimodal fits, and a\n decision about the number of modes is made by comparing the goodness of\n fit. It is a quick but inaccurate way of estimating the number of modes.\n Return preference, selectivity, goodness of fit for both modes, and the\n estimated numer of modes, None if even the unimodal fit failed. If the\n distribution is unimodal, values of the second mode are set to 0. The main\n mode is always the one with the largest selectivity (von Mises bandwith).\n '
no1 = False
f = self.fit_vm(d)
if (self.fit_exit_code != 0):
no1 = True
(p, s, g) = f
f2 = self.fit_v2m(d)
if ((self.fit_exit_code != 0) or (f2[2] > self.worst_fit)):
if (no1 or (f[(- 1)] > self.worst_fit)):
return None
return (p, s, g, 0, 0, 0, 1)
(p1, s1, g1, p2, s2, g2) = f2
if (g1 > g):
return (p, s, g, 0, 0, 0, 1)
if (s2 > s1):
return (p2, s2, g2, p1, s1, g1, 2)
return (p1, s1, g1, p2, s2, g2, 2) |
def process_lengths(input):
'\n Computing the lengths of sentences in current batchs\n '
max_length = input.size(1)
lengths = list((max_length - input.data.eq(0).sum(1).squeeze()))
return lengths | -2,427,997,533,236,584,400 | Computing the lengths of sentences in current batchs | AiR-M/ban/base_model.py | process_lengths | szzexpoi/AiR | python | def process_lengths(input):
'\n \n '
max_length = input.size(1)
lengths = list((max_length - input.data.eq(0).sum(1).squeeze()))
return lengths |
def select_last(x, lengths):
'\n Adaptively select the hidden state at the end of sentences\n '
batch_size = x.size(0)
seq_length = x.size(1)
mask = x.data.new().resize_as_(x.data).fill_(0)
for i in range(batch_size):
mask[i][(lengths[i] - 1)].fill_(1)
mask = Variable(mask)
x = x.mul(mask)
x = x.sum(1).view(batch_size, x.size(2), x.size(3))
return x | -8,111,296,031,895,657,000 | Adaptively select the hidden state at the end of sentences | AiR-M/ban/base_model.py | select_last | szzexpoi/AiR | python | def select_last(x, lengths):
'\n \n '
batch_size = x.size(0)
seq_length = x.size(1)
mask = x.data.new().resize_as_(x.data).fill_(0)
for i in range(batch_size):
mask[i][(lengths[i] - 1)].fill_(1)
mask = Variable(mask)
x = x.mul(mask)
x = x.sum(1).view(batch_size, x.size(2), x.size(3))
return x |
def forward(self, v, b, q):
'Forward\n\n v: [batch, num_objs, obj_dim]\n b: [batch, num_objs, b_dim]\n q: [batch_size, seq_length]\n\n return: logits, not probs\n '
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb)
boxes = b[:, :, :4].transpose(1, 2)
b_emb = ([0] * self.glimpse)
(att, logits) = self.v_att.forward_all(v, q_emb)
for g in range(self.glimpse):
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att[:, g, :, :])
(atten, _) = logits[:, g, :, :].max(2)
embed = self.counter(boxes, atten)
q_emb = (self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb)
q_emb = (q_emb + self.c_prj[g](embed).unsqueeze(1))
logits = self.classifier(q_emb.sum(1))
return (F.softmax(logits, dim=(- 1)), att) | 8,726,120,948,886,197,000 | Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs | AiR-M/ban/base_model.py | forward | szzexpoi/AiR | python | def forward(self, v, b, q):
'Forward\n\n v: [batch, num_objs, obj_dim]\n b: [batch, num_objs, b_dim]\n q: [batch_size, seq_length]\n\n return: logits, not probs\n '
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb)
boxes = b[:, :, :4].transpose(1, 2)
b_emb = ([0] * self.glimpse)
(att, logits) = self.v_att.forward_all(v, q_emb)
for g in range(self.glimpse):
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att[:, g, :, :])
(atten, _) = logits[:, g, :, :].max(2)
embed = self.counter(boxes, atten)
q_emb = (self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb)
q_emb = (q_emb + self.c_prj[g](embed).unsqueeze(1))
logits = self.classifier(q_emb.sum(1))
return (F.softmax(logits, dim=(- 1)), att) |
def forward(self, v, b, q):
'Forward\n\n v: [batch, num_objs, obj_dim]\n b: [batch, num_objs, b_dim]\n q: [batch_size, seq_length]\n\n return: logits, not probs\n '
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb)
ori_q_emb = q_emb
boxes = b[:, :, :4].transpose(1, 2)
b_emb = ([0] * self.glimpse)
s_x = self.init_hidden_state(len(q), 256)
s_h = torch.tanh(self.semantic_q(ori_q_emb.mean(1)))
v_att = torch.tanh(self.att_v(F.dropout(v, 0.25)))
op = []
att_mask = []
q_emb_pool = []
for g in range(self.glimpse):
s_h = self.semantic_rnn(s_x, s_h)
s_x = F.softmax(self.semantic_pred(s_h), dim=(- 1))
op.append(s_x)
s_x = torch.max(s_x, dim=(- 1))[1]
s_x = self.semantic_embed(s_x)
s_att = torch.tanh(self.att_s(s_h)).unsqueeze(1).expand_as(v_att)
fuse_feat = torch.tanh(self.att_p(torch.mul(s_att, v_att)))
reason_att = self.att(fuse_feat)
reason_att = F.softmax(reason_att.view(reason_att.size(0), (- 1)), dim=(- 1))
cur_v = torch.mul(v, reason_att.unsqueeze((- 1)).expand_as(v))
(att, logits) = self.v_att(cur_v, ori_q_emb)
(att, logits) = (att.squeeze(), logits.squeeze())
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att)
(atten, _) = logits.max(2)
embed = self.counter(boxes, atten)
q_emb = (self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb)
q_emb = (q_emb + self.c_prj[g](embed).unsqueeze(1))
q_emb_pool.append(q_emb)
att_mask.append(reason_att)
op = torch.cat([_.unsqueeze(1) for _ in op], dim=1)
att_mask = torch.cat([_.unsqueeze(1) for _ in att_mask], dim=1)
valid_op = process_lengths(torch.max(op, dim=(- 1))[1])
q_emb_pool = torch.cat([_.unsqueeze(1) for _ in q_emb_pool], dim=1)
q_emb = select_last(q_emb_pool, valid_op)
logits = self.classifier(q_emb.sum(1))
return (F.softmax(logits, dim=(- 1)), op, att_mask) | -7,458,918,011,432,265,000 | Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs | AiR-M/ban/base_model.py | forward | szzexpoi/AiR | python | def forward(self, v, b, q):
'Forward\n\n v: [batch, num_objs, obj_dim]\n b: [batch, num_objs, b_dim]\n q: [batch_size, seq_length]\n\n return: logits, not probs\n '
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb)
ori_q_emb = q_emb
boxes = b[:, :, :4].transpose(1, 2)
b_emb = ([0] * self.glimpse)
s_x = self.init_hidden_state(len(q), 256)
s_h = torch.tanh(self.semantic_q(ori_q_emb.mean(1)))
v_att = torch.tanh(self.att_v(F.dropout(v, 0.25)))
op = []
att_mask = []
q_emb_pool = []
for g in range(self.glimpse):
s_h = self.semantic_rnn(s_x, s_h)
s_x = F.softmax(self.semantic_pred(s_h), dim=(- 1))
op.append(s_x)
s_x = torch.max(s_x, dim=(- 1))[1]
s_x = self.semantic_embed(s_x)
s_att = torch.tanh(self.att_s(s_h)).unsqueeze(1).expand_as(v_att)
fuse_feat = torch.tanh(self.att_p(torch.mul(s_att, v_att)))
reason_att = self.att(fuse_feat)
reason_att = F.softmax(reason_att.view(reason_att.size(0), (- 1)), dim=(- 1))
cur_v = torch.mul(v, reason_att.unsqueeze((- 1)).expand_as(v))
(att, logits) = self.v_att(cur_v, ori_q_emb)
(att, logits) = (att.squeeze(), logits.squeeze())
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att)
(atten, _) = logits.max(2)
embed = self.counter(boxes, atten)
q_emb = (self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb)
q_emb = (q_emb + self.c_prj[g](embed).unsqueeze(1))
q_emb_pool.append(q_emb)
att_mask.append(reason_att)
op = torch.cat([_.unsqueeze(1) for _ in op], dim=1)
att_mask = torch.cat([_.unsqueeze(1) for _ in att_mask], dim=1)
valid_op = process_lengths(torch.max(op, dim=(- 1))[1])
q_emb_pool = torch.cat([_.unsqueeze(1) for _ in q_emb_pool], dim=1)
q_emb = select_last(q_emb_pool, valid_op)
logits = self.classifier(q_emb.sum(1))
return (F.softmax(logits, dim=(- 1)), op, att_mask) |
def update_twitter_banner(api: tweepy.API) -> None:
'Update the twitter banner of the current profile using the image specified in config.'
api.update_profile_banner(Config.IMAGE_PATH) | 4,279,449,391,939,052,000 | Update the twitter banner of the current profile using the image specified in config. | app/twitter.py | update_twitter_banner | janaSunrise/Spotify-Twitter-Banner | python | def update_twitter_banner(api: tweepy.API) -> None:
api.update_profile_banner(Config.IMAGE_PATH) |
def __init__(__self__, *, account_name: pulumi.Input[str], resource_group_name: pulumi.Input[str], group_id: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]=None, provisioning_state: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a PrivateEndpointConnection resource.\n :param pulumi.Input[str] account_name: Cosmos DB database account name.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[str] group_id: Group id of the private endpoint.\n :param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.\n :param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.\n :param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.\n "
pulumi.set(__self__, 'account_name', account_name)
pulumi.set(__self__, 'resource_group_name', resource_group_name)
if (group_id is not None):
pulumi.set(__self__, 'group_id', group_id)
if (private_endpoint is not None):
pulumi.set(__self__, 'private_endpoint', private_endpoint)
if (private_endpoint_connection_name is not None):
pulumi.set(__self__, 'private_endpoint_connection_name', private_endpoint_connection_name)
if (private_link_service_connection_state is not None):
pulumi.set(__self__, 'private_link_service_connection_state', private_link_service_connection_state)
if (provisioning_state is not None):
pulumi.set(__self__, 'provisioning_state', provisioning_state) | -4,943,972,685,862,324,000 | The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | __init__ | polivbr/pulumi-azure-native | python | def __init__(__self__, *, account_name: pulumi.Input[str], resource_group_name: pulumi.Input[str], group_id: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]=None, provisioning_state: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a PrivateEndpointConnection resource.\n :param pulumi.Input[str] account_name: Cosmos DB database account name.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[str] group_id: Group id of the private endpoint.\n :param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.\n :param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.\n :param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.\n "
pulumi.set(__self__, 'account_name', account_name)
pulumi.set(__self__, 'resource_group_name', resource_group_name)
if (group_id is not None):
pulumi.set(__self__, 'group_id', group_id)
if (private_endpoint is not None):
pulumi.set(__self__, 'private_endpoint', private_endpoint)
if (private_endpoint_connection_name is not None):
pulumi.set(__self__, 'private_endpoint_connection_name', private_endpoint_connection_name)
if (private_link_service_connection_state is not None):
pulumi.set(__self__, 'private_link_service_connection_state', private_link_service_connection_state)
if (provisioning_state is not None):
pulumi.set(__self__, 'provisioning_state', provisioning_state) |
@property
@pulumi.getter(name='accountName')
def account_name(self) -> pulumi.Input[str]:
'\n Cosmos DB database account name.\n '
return pulumi.get(self, 'account_name') | 502,045,046,113,578,100 | Cosmos DB database account name. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | account_name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='accountName')
def account_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'account_name') |
@property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n The name of the resource group. The name is case insensitive.\n '
return pulumi.get(self, 'resource_group_name') | 9,099,428,823,929,783,000 | The name of the resource group. The name is case insensitive. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | resource_group_name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'resource_group_name') |
@property
@pulumi.getter(name='groupId')
def group_id(self) -> Optional[pulumi.Input[str]]:
'\n Group id of the private endpoint.\n '
return pulumi.get(self, 'group_id') | 6,219,630,646,862,960,000 | Group id of the private endpoint. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | group_id | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='groupId')
def group_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'group_id') |
@property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
'\n Private endpoint which the connection belongs to.\n '
return pulumi.get(self, 'private_endpoint') | 5,022,729,442,492,625,000 | Private endpoint which the connection belongs to. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | private_endpoint | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
'\n \n '
return pulumi.get(self, 'private_endpoint') |
@property
@pulumi.getter(name='privateEndpointConnectionName')
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
'\n The name of the private endpoint connection.\n '
return pulumi.get(self, 'private_endpoint_connection_name') | 9,106,011,365,118,251,000 | The name of the private endpoint connection. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | private_endpoint_connection_name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='privateEndpointConnectionName')
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'private_endpoint_connection_name') |
@property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
'\n Connection State of the Private Endpoint Connection.\n '
return pulumi.get(self, 'private_link_service_connection_state') | -6,430,009,499,459,862,000 | Connection State of the Private Endpoint Connection. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | private_link_service_connection_state | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
'\n \n '
return pulumi.get(self, 'private_link_service_connection_state') |
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
'\n Provisioning state of the private endpoint.\n '
return pulumi.get(self, 'provisioning_state') | -7,459,372,872,054,955,000 | Provisioning state of the private endpoint. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | provisioning_state | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'provisioning_state') |
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, account_name: Optional[pulumi.Input[str]]=None, group_id: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]]=None, provisioning_state: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n A private endpoint connection\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] account_name: Cosmos DB database account name.\n :param pulumi.Input[str] group_id: Group id of the private endpoint.\n :param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.\n :param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.\n :param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n "
... | 2,332,089,667,209,762,000 | A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | __init__ | polivbr/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, account_name: Optional[pulumi.Input[str]]=None, group_id: Optional[pulumi.Input[str]]=None, private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]]=None, private_endpoint_connection_name: Optional[pulumi.Input[str]]=None, private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]]=None, provisioning_state: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n A private endpoint connection\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] account_name: Cosmos DB database account name.\n :param pulumi.Input[str] group_id: Group id of the private endpoint.\n :param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.\n :param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.\n :param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.\n :param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n "
... |
@overload
def __init__(__self__, resource_name: str, args: PrivateEndpointConnectionArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n A private endpoint connection\n\n :param str resource_name: The name of the resource.\n :param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... | 1,086,483,198,058,566,000 | A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | __init__ | polivbr/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, args: PrivateEndpointConnectionArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n A private endpoint connection\n\n :param str resource_name: The name of the resource.\n :param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... |
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'PrivateEndpointConnection':
"\n Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__['group_id'] = None
__props__.__dict__['name'] = None
__props__.__dict__['private_endpoint'] = None
__props__.__dict__['private_link_service_connection_state'] = None
__props__.__dict__['provisioning_state'] = None
__props__.__dict__['type'] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__) | 1,281,849,848,306,374,700 | Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | get | polivbr/pulumi-azure-native | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'PrivateEndpointConnection':
"\n Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__['group_id'] = None
__props__.__dict__['name'] = None
__props__.__dict__['private_endpoint'] = None
__props__.__dict__['private_link_service_connection_state'] = None
__props__.__dict__['provisioning_state'] = None
__props__.__dict__['type'] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__) |
@property
@pulumi.getter(name='groupId')
def group_id(self) -> pulumi.Output[Optional[str]]:
'\n Group id of the private endpoint.\n '
return pulumi.get(self, 'group_id') | 1,222,296,214,502,759,200 | Group id of the private endpoint. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | group_id | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='groupId')
def group_id(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'group_id') |
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n The name of the resource\n '
return pulumi.get(self, 'name') | 2,231,345,607,626,165,800 | The name of the resource | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name') |
@property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
'\n Private endpoint which the connection belongs to.\n '
return pulumi.get(self, 'private_endpoint') | 5,557,291,932,331,128,000 | Private endpoint which the connection belongs to. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | private_endpoint | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='privateEndpoint')
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
'\n \n '
return pulumi.get(self, 'private_endpoint') |
@property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
'\n Connection State of the Private Endpoint Connection.\n '
return pulumi.get(self, 'private_link_service_connection_state') | 4,314,061,072,508,133,000 | Connection State of the Private Endpoint Connection. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | private_link_service_connection_state | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='privateLinkServiceConnectionState')
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
'\n \n '
return pulumi.get(self, 'private_link_service_connection_state') |
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
'\n Provisioning state of the private endpoint.\n '
return pulumi.get(self, 'provisioning_state') | -2,609,549,406,412,615,000 | Provisioning state of the private endpoint. | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | provisioning_state | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'provisioning_state') |
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"\n '
return pulumi.get(self, 'type') | -5,449,551,391,296,740,000 | The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" | sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py | type | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type') |
def p_bp_location(self, args):
'\n bp_start ::= opt_space location_if opt_space\n ' | -6,279,436,233,502,354,000 | bp_start ::= opt_space location_if opt_space | example/gdb-loc/gdbloc/parser.py | p_bp_location | rocky/python-spark | python | def p_bp_location(self, args):
'\n \n ' |
def p_asm_range(self, args):
"\n arange_start ::= opt_space arange\n arange ::= range\n arange ::= addr_location opt_space COMMA opt_space NUMBER\n arange ::= addr_location opt_space COMMA opt_space OFFSET\n arange ::= addr_location opt_space COMMA opt_space ADDRESS\n arange ::= location opt_space COMMA opt_space ADDRESS\n arange ::= addr_location opt_space COMMA\n arange ::= addr_location\n\n # Unlike ranges, We don't allow ending at an address\n # arange ::= COMMA opt_space addr_location\n\n addr_location ::= location\n addr_location ::= ADDRESS\n " | 6,546,008,999,439,895,000 | arange_start ::= opt_space arange
arange ::= range
arange ::= addr_location opt_space COMMA opt_space NUMBER
arange ::= addr_location opt_space COMMA opt_space OFFSET
arange ::= addr_location opt_space COMMA opt_space ADDRESS
arange ::= location opt_space COMMA opt_space ADDRESS
arange ::= addr_location opt_space COMMA
arange ::= addr_location
# Unlike ranges, We don't allow ending at an address
# arange ::= COMMA opt_space addr_location
addr_location ::= location
addr_location ::= ADDRESS | example/gdb-loc/gdbloc/parser.py | p_asm_range | rocky/python-spark | python | def p_asm_range(self, args):
"\n arange_start ::= opt_space arange\n arange ::= range\n arange ::= addr_location opt_space COMMA opt_space NUMBER\n arange ::= addr_location opt_space COMMA opt_space OFFSET\n arange ::= addr_location opt_space COMMA opt_space ADDRESS\n arange ::= location opt_space COMMA opt_space ADDRESS\n arange ::= addr_location opt_space COMMA\n arange ::= addr_location\n\n # Unlike ranges, We don't allow ending at an address\n # arange ::= COMMA opt_space addr_location\n\n addr_location ::= location\n addr_location ::= ADDRESS\n " |
def p_list_range(self, args):
'\n range_start ::= opt_space range\n\n range ::= location opt_space COMMA opt_space NUMBER\n range ::= location opt_space COMMA opt_space OFFSET\n range ::= COMMA opt_space location\n range ::= location opt_space COMMA\n range ::= location\n range ::= DIRECTION\n ' | -2,263,275,189,617,537,000 | range_start ::= opt_space range
range ::= location opt_space COMMA opt_space NUMBER
range ::= location opt_space COMMA opt_space OFFSET
range ::= COMMA opt_space location
range ::= location opt_space COMMA
range ::= location
range ::= DIRECTION | example/gdb-loc/gdbloc/parser.py | p_list_range | rocky/python-spark | python | def p_list_range(self, args):
'\n range_start ::= opt_space range\n\n range ::= location opt_space COMMA opt_space NUMBER\n range ::= location opt_space COMMA opt_space OFFSET\n range ::= COMMA opt_space location\n range ::= location opt_space COMMA\n range ::= location\n range ::= DIRECTION\n ' |
def p_location(self, args):
'\n opt_space ::= SPACE?\n\n location_if ::= location\n location_if ::= location SPACE IF tokens\n\n # Note no space is allowed between FILENAME and NUMBER\n location ::= FILENAME COLON NUMBER\n location ::= FUNCNAME\n\n # If just a number is given, the the filename is implied\n location ::= NUMBER\n location ::= METHOD\n location ::= OFFSET\n\n # For tokens we accept anything. Were really just\n # going to use the underlying string from the part\n # after "if". So below we all of the possible tokens\n\n tokens ::= token+\n token ::= COLON\n token ::= COMMA\n token ::= DIRECTION\n token ::= FILENAME\n token ::= FUNCNAME\n token ::= NUMBER\n token ::= OFFSET\n token ::= SPACE\n ' | 857,460,103,374,779,500 | opt_space ::= SPACE?
location_if ::= location
location_if ::= location SPACE IF tokens
# Note no space is allowed between FILENAME and NUMBER
location ::= FILENAME COLON NUMBER
location ::= FUNCNAME
# If just a number is given, the the filename is implied
location ::= NUMBER
location ::= METHOD
location ::= OFFSET
# For tokens we accept anything. Were really just
# going to use the underlying string from the part
# after "if". So below we all of the possible tokens
tokens ::= token+
token ::= COLON
token ::= COMMA
token ::= DIRECTION
token ::= FILENAME
token ::= FUNCNAME
token ::= NUMBER
token ::= OFFSET
token ::= SPACE | example/gdb-loc/gdbloc/parser.py | p_location | rocky/python-spark | python | def p_location(self, args):
'\n opt_space ::= SPACE?\n\n location_if ::= location\n location_if ::= location SPACE IF tokens\n\n # Note no space is allowed between FILENAME and NUMBER\n location ::= FILENAME COLON NUMBER\n location ::= FUNCNAME\n\n # If just a number is given, the the filename is implied\n location ::= NUMBER\n location ::= METHOD\n location ::= OFFSET\n\n # For tokens we accept anything. Were really just\n # going to use the underlying string from the part\n # after "if". So below we all of the possible tokens\n\n tokens ::= token+\n token ::= COLON\n token ::= COMMA\n token ::= DIRECTION\n token ::= FILENAME\n token ::= FUNCNAME\n token ::= NUMBER\n token ::= OFFSET\n token ::= SPACE\n ' |
def __init__(self, server_url: str, **kwargs) -> None:
'The WOQLClient constructor.\n\n Parameters\n ----------\n server_url : str\n URL of the server that this client will connect to.\n \\**kwargs\n Extra configuration options\n\n '
self.server_url = server_url.strip('/')
self.api = f'{self.server_url}/api'
self._connected = False
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None | -1,364,098,096,190,279,400 | The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options | terminusdb_client/woqlclient/woqlClient.py | __init__ | terminusdb/woql-client-p | python | def __init__(self, server_url: str, **kwargs) -> None:
'The WOQLClient constructor.\n\n Parameters\n ----------\n server_url : str\n URL of the server that this client will connect to.\n \\**kwargs\n Extra configuration options\n\n '
self.server_url = server_url.strip('/')
self.api = f'{self.server_url}/api'
self._connected = False
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None |
def connect(self, team: str='admin', db: Optional[str]=None, remote_auth: str=None, use_token: bool=False, jwt_token: Optional[str]=None, api_token: Optional[str]=None, key: str='root', user: str='admin', branch: str='main', ref: Optional[str]=None, repo: str='local', **kwargs) -> None:
'Connect to a Terminus server at the given URI with an API key.\n\n Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.\n\n Parameters\n ----------\n team: str\n Name of the team, default to be "admin"\n db: optional, str\n Name of the database connected\n remote_auth: optional, str\n Remote Auth setting\n key: optional, str\n API key for connecting, default to be "root"\n user: optional, str\n Name of the user, default to be "admin"\n use_token: bool\n Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token\n jwt_token: optional, str\n The Bearer JWT token to connect. Default to be None.\n api_token: optional, strs\n The API token to connect. Default to be None.\n branch: optional, str\n Branch to be connected, default to be "main"\n ref: optional, str\n Ref setting\n repo: optional, str\n Local or remote repo, default to be "local"\n \\**kwargs\n Extra configuration options.\n\n Examples\n -------\n >>> client = WOQLClient("https://127.0.0.1:6363")\n >>> client.connect(key="root", team="admin", user="admin", db="example_db")\n '
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(_finish_response(requests.get((self.api + '/info'), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, auth=self._auth())))
except Exception as error:
raise InterfaceError(f'Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}') from None
if (self.db is not None):
try:
_finish_response(requests.head(self._db_url(), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, params={'exists': 'true'}, auth=self._auth()))
except DatabaseError:
raise InterfaceError(f'Connection fail, {self.db} does not exist.')
self._author = self.user | 8,423,099,675,081,320,000 | Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db") | terminusdb_client/woqlclient/woqlClient.py | connect | terminusdb/woql-client-p | python | def connect(self, team: str='admin', db: Optional[str]=None, remote_auth: str=None, use_token: bool=False, jwt_token: Optional[str]=None, api_token: Optional[str]=None, key: str='root', user: str='admin', branch: str='main', ref: Optional[str]=None, repo: str='local', **kwargs) -> None:
'Connect to a Terminus server at the given URI with an API key.\n\n Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.\n\n Parameters\n ----------\n team: str\n Name of the team, default to be "admin"\n db: optional, str\n Name of the database connected\n remote_auth: optional, str\n Remote Auth setting\n key: optional, str\n API key for connecting, default to be "root"\n user: optional, str\n Name of the user, default to be "admin"\n use_token: bool\n Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token\n jwt_token: optional, str\n The Bearer JWT token to connect. Default to be None.\n api_token: optional, strs\n The API token to connect. Default to be None.\n branch: optional, str\n Branch to be connected, default to be "main"\n ref: optional, str\n Ref setting\n repo: optional, str\n Local or remote repo, default to be "local"\n \\**kwargs\n Extra configuration options.\n\n Examples\n -------\n >>> client = WOQLClient("https://127.0.0.1:6363")\n >>> client.connect(key="root", team="admin", user="admin", db="example_db")\n '
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(_finish_response(requests.get((self.api + '/info'), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, auth=self._auth())))
except Exception as error:
raise InterfaceError(f'Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}') from None
if (self.db is not None):
try:
_finish_response(requests.head(self._db_url(), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, params={'exists': 'true'}, auth=self._auth()))
except DatabaseError:
raise InterfaceError(f'Connection fail, {self.db} does not exist.')
self._author = self.user |
def close(self) -> None:
'Undo connect and close the connection.\n\n The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again.'
self._connected = False | -6,189,939,517,125,445,000 | Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again. | terminusdb_client/woqlclient/woqlClient.py | close | terminusdb/woql-client-p | python | def close(self) -> None:
'Undo connect and close the connection.\n\n The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again.'
self._connected = False |
def _check_connection(self, check_db=True) -> None:
'Raise connection InterfaceError if not connected\n Defaults to check if a db is connected'
if (not self._connected):
raise InterfaceError('Client is not connected to a TerminusDB server.')
if (check_db and (self.db is None)):
raise InterfaceError('No database is connected. Please either connect to a database or create a new database.') | 4,379,756,186,759,730,700 | Raise connection InterfaceError if not connected
Defaults to check if a db is connected | terminusdb_client/woqlclient/woqlClient.py | _check_connection | terminusdb/woql-client-p | python | def _check_connection(self, check_db=True) -> None:
'Raise connection InterfaceError if not connected\n Defaults to check if a db is connected'
if (not self._connected):
raise InterfaceError('Client is not connected to a TerminusDB server.')
if (check_db and (self.db is None)):
raise InterfaceError('No database is connected. Please either connect to a database or create a new database.') |
def get_commit_history(self, max_history: int=500) -> list:
'Get the whole commit history.\n Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:\n {"commit_id":\n {"author": "commit_author",\n "message": "commit_message",\n "timestamp: <datetime object of the timestamp>" }\n }\n\n Parameters\n ----------\n max_history: int, optional\n maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.\n\n Example\n -------\n >>> from terminusdb_client import WOQLClient\n >>> client = WOQLClient("https://127.0.0.1:6363"\n >>> client.connect(db="bank_balance_example")\n >>> client.get_commit_history()\n [{\'commit\': \'s90wike9v5xibmrb661emxjs8k7ynwc\', \'author\': \'admin\', \'message\': \'Adding Jane\', \'timestamp\': datetime.da\n tetime(2020, 9, 3, 15, 29, 34)}, {\'commit\': \'1qhge8qlodajx93ovj67kvkrkxsw3pg\', \'author\': \'[email protected]\', \'m\n essage\': \'Adding Jim\', \'timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {\'commit\': \'rciy1rfu5foj67ch00ow6f6n\n njjxe3i\', \'author\': \'[email protected]\', \'message\': \'Update mike\', \'timestamp\': datetime.datetime(2020, 9, 3, 15,\n 29, 33)}, {\'commit\': \'n4d86u8juzx852r2ekrega5hl838ovh\', \'author\': \'[email protected]\', \'message\': \'Add mike\', \'\n timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {\'commit\': \'1vk2i8k8xce26p9jpi4zmq1h5vdqyuj\', \'author\': \'gav\n [email protected]\', \'message\': \'Label for balance was wrong\', \'timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)\n }, {\'commit\': \'9si4na9zv2qol9b189y92fia7ac3hbg\', \'author\': \'[email protected]\', \'message\': \'Adding bank account\n object to schema\', \'timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {\'commit\': \'9egc4h0m36l5rbq1alr1fki6jbfu\n kuv\', \'author\': \'TerminusDB\', \'message\': \'internal system operation\', \'timstamp\': datetime.datetime(2020, 9, 3, 15,\n 29, 33)}]\n\n Returns\n -------\n list\n '
if (max_history < 0):
raise ValueError('max_history needs to be non-negative.')
if (max_history > 1):
limit_history = (max_history - 1)
else:
limit_history = 1
woql_query = WOQLQuery().using('_commits').limit(limit_history).triple('v:branch', 'name', WOQLQuery().string(self.branch)).triple('v:branch', 'head', 'v:commit').path('v:commit', 'parent*', 'v:target_commit').triple('v:target_commit', 'identifier', 'v:cid').triple('v:target_commit', 'author', 'v:author').triple('v:target_commit', 'message', 'v:message').triple('v:target_commit', 'timestamp', 'v:timestamp')
result = self.query(woql_query).get('bindings')
if (not result):
return result
else:
result_list = []
for result_item in result:
result_list.append({'commit': result_item['cid']['@value'], 'author': result_item['author']['@value'], 'message': result_item['message']['@value'], 'timestamp': datetime.fromtimestamp(int(result_item['timestamp']['@value']))})
return result_list | 1,425,721,135,152,934,100 | Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': '[email protected]', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': '[email protected]', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': '[email protected]', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
[email protected]', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': '[email protected]', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list | terminusdb_client/woqlclient/woqlClient.py | get_commit_history | terminusdb/woql-client-p | python | def get_commit_history(self, max_history: int=500) -> list:
'Get the whole commit history.\n Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:\n {"commit_id":\n {"author": "commit_author",\n "message": "commit_message",\n "timestamp: <datetime object of the timestamp>" }\n }\n\n Parameters\n ----------\n max_history: int, optional\n maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.\n\n Example\n -------\n >>> from terminusdb_client import WOQLClient\n >>> client = WOQLClient("https://127.0.0.1:6363"\n >>> client.connect(db="bank_balance_example")\n >>> client.get_commit_history()\n [{\'commit\': \'s90wike9v5xibmrb661emxjs8k7ynwc\', \'author\': \'admin\', \'message\': \'Adding Jane\', \'timestamp\': datetime.da\n tetime(2020, 9, 3, 15, 29, 34)}, {\'commit\': \'1qhge8qlodajx93ovj67kvkrkxsw3pg\', \'author\': \'[email protected]\', \'m\n essage\': \'Adding Jim\', \'timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {\'commit\': \'rciy1rfu5foj67ch00ow6f6n\n njjxe3i\', \'author\': \'[email protected]\', \'message\': \'Update mike\', \'timestamp\': datetime.datetime(2020, 9, 3, 15,\n 29, 33)}, {\'commit\': \'n4d86u8juzx852r2ekrega5hl838ovh\', \'author\': \'[email protected]\', \'message\': \'Add mike\', \'\n timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {\'commit\': \'1vk2i8k8xce26p9jpi4zmq1h5vdqyuj\', \'author\': \'gav\n [email protected]\', \'message\': \'Label for balance was wrong\', \'timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)\n }, {\'commit\': \'9si4na9zv2qol9b189y92fia7ac3hbg\', \'author\': \'[email protected]\', \'message\': \'Adding bank account\n object to schema\', \'timestamp\': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {\'commit\': \'9egc4h0m36l5rbq1alr1fki6jbfu\n kuv\', \'author\': \'TerminusDB\', \'message\': \'internal system operation\', \'timstamp\': datetime.datetime(2020, 9, 3, 15,\n 29, 33)}]\n\n Returns\n -------\n list\n '
if (max_history < 0):
raise ValueError('max_history needs to be non-negative.')
if (max_history > 1):
limit_history = (max_history - 1)
else:
limit_history = 1
woql_query = WOQLQuery().using('_commits').limit(limit_history).triple('v:branch', 'name', WOQLQuery().string(self.branch)).triple('v:branch', 'head', 'v:commit').path('v:commit', 'parent*', 'v:target_commit').triple('v:target_commit', 'identifier', 'v:cid').triple('v:target_commit', 'author', 'v:author').triple('v:target_commit', 'message', 'v:message').triple('v:target_commit', 'timestamp', 'v:timestamp')
result = self.query(woql_query).get('bindings')
if (not result):
return result
else:
result_list = []
for result_item in result:
result_list.append({'commit': result_item['cid']['@value'], 'author': result_item['author']['@value'], 'message': result_item['message']['@value'], 'timestamp': datetime.fromtimestamp(int(result_item['timestamp']['@value']))})
return result_list |
def get_all_branches(self, get_data_version=False):
'Get all the branches available in the database.'
self._check_connection()
api_url = self._documents_url().split('/')
api_url = api_url[:(- 2)]
api_url = ('/'.join(api_url) + '/_commits')
result = requests.get(api_url, headers={'user-agent': f'terminusdb-client-python/{__version__}'}, params={'type': 'Branch'}, auth=self._auth())
if get_data_version:
(result, version) = _finish_response(result, get_data_version)
return (list(_result2stream(result)), version)
return list(_result2stream(_finish_response(result))) | -2,184,391,910,578,389,500 | Get all the branches available in the database. | terminusdb_client/woqlclient/woqlClient.py | get_all_branches | terminusdb/woql-client-p | python | def get_all_branches(self, get_data_version=False):
self._check_connection()
api_url = self._documents_url().split('/')
api_url = api_url[:(- 2)]
api_url = ('/'.join(api_url) + '/_commits')
result = requests.get(api_url, headers={'user-agent': f'terminusdb-client-python/{__version__}'}, params={'type': 'Branch'}, auth=self._auth())
if get_data_version:
(result, version) = _finish_response(result, get_data_version)
return (list(_result2stream(result)), version)
return list(_result2stream(_finish_response(result))) |
def rollback(self, steps=1) -> None:
"Curently not implementated. Please check back later.\n\n Raises\n ----------\n NotImplementedError\n Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset\n\n "
raise NotImplementedError('Open transactions are currently not supported. To reset commit head, check WOQLClient.reset') | -2,812,794,403,207,086,000 | Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset | terminusdb_client/woqlclient/woqlClient.py | rollback | terminusdb/woql-client-p | python | def rollback(self, steps=1) -> None:
"Curently not implementated. Please check back later.\n\n Raises\n ----------\n NotImplementedError\n Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset\n\n "
raise NotImplementedError('Open transactions are currently not supported. To reset commit head, check WOQLClient.reset') |
def copy(self) -> 'WOQLClient':
'Create a deep copy of this client.\n\n Returns\n -------\n WOQLClient\n The copied client instance.\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363/")\n >>> clone = client.copy()\n >>> assert client is not clone\n '
return copy.deepcopy(self) | -5,851,508,991,514,087,000 | Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone | terminusdb_client/woqlclient/woqlClient.py | copy | terminusdb/woql-client-p | python | def copy(self) -> 'WOQLClient':
'Create a deep copy of this client.\n\n Returns\n -------\n WOQLClient\n The copied client instance.\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363/")\n >>> clone = client.copy()\n >>> assert client is not clone\n '
return copy.deepcopy(self) |
def set_db(self, dbid: str, team: Optional[str]=None) -> str:
'Set the connection to another database. This will reset the connection.\n\n Parameters\n ----------\n dbid : str\n Database identifer to set in the config.\n team : str\n Team identifer to set in the config. If not passed in, it will use the current one.\n\n Returns\n -------\n str\n The current database identifier.\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363")\n >>> client.set_db("database1")\n \'database1\'\n '
self._check_connection(check_db=False)
if (team is None):
team = self.team
return self.connect(team=team, db=dbid, remote_auth=self._remote_auth, key=self._key, user=self.user, branch=self.branch, ref=self.ref, repo=self.repo) | 6,096,587,200,381,540,000 | Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1' | terminusdb_client/woqlclient/woqlClient.py | set_db | terminusdb/woql-client-p | python | def set_db(self, dbid: str, team: Optional[str]=None) -> str:
'Set the connection to another database. This will reset the connection.\n\n Parameters\n ----------\n dbid : str\n Database identifer to set in the config.\n team : str\n Team identifer to set in the config. If not passed in, it will use the current one.\n\n Returns\n -------\n str\n The current database identifier.\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363")\n >>> client.set_db("database1")\n \'database1\'\n '
self._check_connection(check_db=False)
if (team is None):
team = self.team
return self.connect(team=team, db=dbid, remote_auth=self._remote_auth, key=self._key, user=self.user, branch=self.branch, ref=self.ref, repo=self.repo) |
def resource(self, ttype: ResourceType, val: Optional[str]=None) -> str:
'Create a resource identifier string based on the current config.\n\n Parameters\n ----------\n ttype : ResourceType\n Type of resource.\n val : str, optional\n Branch or commit identifier.\n\n Returns\n -------\n str\n The constructed resource string.\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363")\n >>> client.resource(ResourceType.DB)\n \'<team>/<db>/\'\n >>> client.resource(ResourceType.META)\n \'<team>/<db>/_meta\'\n >>> client.resource(ResourceType.COMMITS)\n \'<team>/<db>/<repo>/_commits\'\n >>> client.resource(ResourceType.REF, "<reference>")\n \'<team>/<db>/<repo>/commit/<reference>\'\n >>> client.resource(ResourceType.BRANCH, "<branch>")\n \'<team>/<db>/<repo>/branch/<branch>\'\n '
base = (((self.team + '/') + self.db) + '/')
ref_value = (val if val else self.ref)
branch_value = (val if val else self.branch)
urls = {ResourceType.DB: base, ResourceType.META: f'{base}_meta', ResourceType.REPO: f'{base}{self.repo}/_meta', ResourceType.COMMITS: f'{base}{self.repo}/_commits', ResourceType.REF: f'{base}{self.repo}/commit/{ref_value}', ResourceType.BRANCH: f'{base}{self.repo}/{branch_value}'}
return urls[ttype] | 930,136,883,031,564,500 | Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>' | terminusdb_client/woqlclient/woqlClient.py | resource | terminusdb/woql-client-p | python | def resource(self, ttype: ResourceType, val: Optional[str]=None) -> str:
'Create a resource identifier string based on the current config.\n\n Parameters\n ----------\n ttype : ResourceType\n Type of resource.\n val : str, optional\n Branch or commit identifier.\n\n Returns\n -------\n str\n The constructed resource string.\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363")\n >>> client.resource(ResourceType.DB)\n \'<team>/<db>/\'\n >>> client.resource(ResourceType.META)\n \'<team>/<db>/_meta\'\n >>> client.resource(ResourceType.COMMITS)\n \'<team>/<db>/<repo>/_commits\'\n >>> client.resource(ResourceType.REF, "<reference>")\n \'<team>/<db>/<repo>/commit/<reference>\'\n >>> client.resource(ResourceType.BRANCH, "<branch>")\n \'<team>/<db>/<repo>/branch/<branch>\'\n '
base = (((self.team + '/') + self.db) + '/')
ref_value = (val if val else self.ref)
branch_value = (val if val else self.branch)
urls = {ResourceType.DB: base, ResourceType.META: f'{base}_meta', ResourceType.REPO: f'{base}{self.repo}/_meta', ResourceType.COMMITS: f'{base}{self.repo}/_commits', ResourceType.REF: f'{base}{self.repo}/commit/{ref_value}', ResourceType.BRANCH: f'{base}{self.repo}/{branch_value}'}
return urls[ttype] |
def _get_prefixes(self):
'Get the prefixes for a given database'
self._check_connection()
result = requests.get(self._db_base('prefixes'), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, auth=self._auth())
return json.loads(_finish_response(result)) | 5,538,847,128,667,846,000 | Get the prefixes for a given database | terminusdb_client/woqlclient/woqlClient.py | _get_prefixes | terminusdb/woql-client-p | python | def _get_prefixes(self):
self._check_connection()
result = requests.get(self._db_base('prefixes'), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, auth=self._auth())
return json.loads(_finish_response(result)) |
def create_database(self, dbid: str, team: Optional[str]=None, label: Optional[str]=None, description: Optional[str]=None, prefixes: Optional[dict]=None, include_schema: bool=True) -> None:
'Create a TerminusDB database by posting\n a terminus:Database document to the Terminus Server.\n\n Parameters\n ----------\n dbid : str\n Unique identifier of the database.\n team : str, optional\n ID of the Team in which to create the DB (defaults to \'admin\')\n label : str, optional\n Database name.\n description : str, optional\n Database description.\n prefixes : dict, optional\n Optional dict containing ``"@base"`` and ``"@schema"`` keys.\n\n @base (str)\n IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.\n @schema (str)\n IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.\n include_schema : bool\n If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a server\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363/")\n >>> client.create_database("someDB", "admin", "Database Label", "My Description")\n '
self._check_connection(check_db=False)
details: Dict[(str, Any)] = {}
if label:
details['label'] = label
else:
details['label'] = dbid
if description:
details['comment'] = description
else:
details['comment'] = ''
if include_schema:
details['schema'] = True
if prefixes:
details['prefixes'] = prefixes
if (team is None):
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(requests.post(self._db_url(), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, json=details, auth=self._auth())) | 3,640,779,118,710,737,000 | Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description") | terminusdb_client/woqlclient/woqlClient.py | create_database | terminusdb/woql-client-p | python | def create_database(self, dbid: str, team: Optional[str]=None, label: Optional[str]=None, description: Optional[str]=None, prefixes: Optional[dict]=None, include_schema: bool=True) -> None:
'Create a TerminusDB database by posting\n a terminus:Database document to the Terminus Server.\n\n Parameters\n ----------\n dbid : str\n Unique identifier of the database.\n team : str, optional\n ID of the Team in which to create the DB (defaults to \'admin\')\n label : str, optional\n Database name.\n description : str, optional\n Database description.\n prefixes : dict, optional\n Optional dict containing ``"@base"`` and ``"@schema"`` keys.\n\n @base (str)\n IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.\n @schema (str)\n IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.\n include_schema : bool\n If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a server\n\n Examples\n --------\n >>> client = WOQLClient("https://127.0.0.1:6363/")\n >>> client.create_database("someDB", "admin", "Database Label", "My Description")\n '
self._check_connection(check_db=False)
details: Dict[(str, Any)] = {}
if label:
details['label'] = label
else:
details['label'] = dbid
if description:
details['comment'] = description
else:
details['comment'] =
if include_schema:
details['schema'] = True
if prefixes:
details['prefixes'] = prefixes
if (team is None):
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(requests.post(self._db_url(), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, json=details, auth=self._auth())) |
def delete_database(self, dbid: Optional[str]=None, team: Optional[str]=None, force: bool=False) -> None:
'Delete a TerminusDB database.\n\n If ``team`` is provided, then the team in the config will be updated\n and the new value will be used in future requests to the server.\n\n Parameters\n ----------\n dbid : str\n ID of the database to delete\n team : str, optional\n the team in which the database resides (defaults to "admin")\n force: bool\n\n Raises\n ------\n UserWarning\n If the value of dbid is None.\n InterfaceError\n if the client does not connect to a server.\n\n Examples\n -------\n >>> client = WOQLClient("https://127.0.0.1:6363/")\n >>> client.delete_database("<database>", "<team>")\n '
self._check_connection(check_db=False)
if (dbid is None):
raise UserWarning(f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead.")
self.db = dbid
if (team is None):
warnings.warn(f'Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}')
else:
self.team = team
payload = {'force': force}
_finish_response(requests.delete(self._db_url(), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, auth=self._auth(), params=payload))
self.db = None | -4,838,731,874,326,968,000 | Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>") | terminusdb_client/woqlclient/woqlClient.py | delete_database | terminusdb/woql-client-p | python | def delete_database(self, dbid: Optional[str]=None, team: Optional[str]=None, force: bool=False) -> None:
'Delete a TerminusDB database.\n\n If ``team`` is provided, then the team in the config will be updated\n and the new value will be used in future requests to the server.\n\n Parameters\n ----------\n dbid : str\n ID of the database to delete\n team : str, optional\n the team in which the database resides (defaults to "admin")\n force: bool\n\n Raises\n ------\n UserWarning\n If the value of dbid is None.\n InterfaceError\n if the client does not connect to a server.\n\n Examples\n -------\n >>> client = WOQLClient("https://127.0.0.1:6363/")\n >>> client.delete_database("<database>", "<team>")\n '
self._check_connection(check_db=False)
if (dbid is None):
raise UserWarning(f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead.")
self.db = dbid
if (team is None):
warnings.warn(f'Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}')
else:
self.team = team
payload = {'force': force}
_finish_response(requests.delete(self._db_url(), headers={'user-agent': f'terminusdb-client-python/{__version__}'}, auth=self._auth(), params=payload))
self.db = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.