body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def inverse_deriv(self, z):
"\n Derivative of the inverse of the Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The derivative of the inverse of the LogLog link function\n "
return np.exp(((- np.exp((- z))) - z)) | 1,061,430,631,846,378,800 | Derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the LogLog link function | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse of the Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The derivative of the inverse of the LogLog link function\n "
return np.exp(((- np.exp((- z))) - z)) |
def inverse_deriv2(self, z):
"\n Second derivative of the inverse of the Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n g^(-1)''(z) : ndarray\n The second derivative of the inverse of the LogLog link function\n "
return (self.inverse_deriv(z) * (np.exp((- z)) - 1)) | -7,096,429,890,514,333,000 | Second derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)''(z) : ndarray
The second derivative of the inverse of the LogLog link function | statsmodels/genmod/families/links.py | inverse_deriv2 | BioGeneTools/statsmodels | python | def inverse_deriv2(self, z):
"\n Second derivative of the inverse of the Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n g^(-1)(z) : ndarray\n The second derivative of the inverse of the LogLog link function\n "
return (self.inverse_deriv(z) * (np.exp((- z)) - 1)) |
def __call__(self, p):
'\n Negative Binomial transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The negative binomial transform of `p`\n\n Notes\n -----\n g(p) = log(p/(p + 1/alpha))\n '
p = self._clean(p)
return np.log((p / (p + (1 / self.alpha)))) | 5,409,394,703,314,850,000 | Negative Binomial transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha)) | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n Negative Binomial transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The negative binomial transform of `p`\n\n Notes\n -----\n g(p) = log(p/(p + 1/alpha))\n '
p = self._clean(p)
return np.log((p / (p + (1 / self.alpha)))) |
def inverse(self, z):
'\n Inverse of the negative binomial transform\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the negative binomial link at `p`.\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))\n '
return ((- 1) / (self.alpha * (1 - np.exp((- z))))) | -2,830,177,018,432,326,700 | Inverse of the negative binomial transform
Parameters
----------
z : array_like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z))) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of the negative binomial transform\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the negative binomial link at `p`.\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))\n '
return ((- 1) / (self.alpha * (1 - np.exp((- z))))) |
def deriv(self, p):
"\n Derivative of the negative binomial transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the negative binomial transform link function\n\n Notes\n -----\n g'(x) = 1/(x+alpha*x^2)\n "
return (1 / (p + (self.alpha * (p ** 2)))) | -6,867,509,968,575,642,000 | Derivative of the negative binomial transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2) | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of the negative binomial transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the negative binomial transform link function\n\n Notes\n -----\n g'(x) = 1/(x+alpha*x^2)\n "
return (1 / (p + (self.alpha * (p ** 2)))) |
def deriv2(self, p):
"\n Second derivative of the negative binomial link function.\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n The second derivative of the negative binomial transform link\n function\n\n Notes\n -----\n g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2\n "
numer = (- (1 + ((2 * self.alpha) * p)))
denom = ((p + (self.alpha * (p ** 2))) ** 2)
return (numer / denom) | 521,529,222,361,369,200 | Second derivative of the negative binomial link function.
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2 | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the negative binomial link function.\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g(p) : ndarray\n The second derivative of the negative binomial transform link\n function\n\n Notes\n -----\n g(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2\n "
numer = (- (1 + ((2 * self.alpha) * p)))
denom = ((p + (self.alpha * (p ** 2))) ** 2)
return (numer / denom) |
def inverse_deriv(self, z):
"\n Derivative of the inverse of the negative binomial transform\n\n Parameters\n ----------\n z : array_like\n Usually the linear predictor for a GLM or GEE model\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the negative\n binomial link\n "
t = np.exp(z)
return (t / (self.alpha * ((1 - t) ** 2))) | -1,360,131,057,683,691,300 | Derivative of the inverse of the negative binomial transform
Parameters
----------
z : array_like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the negative
binomial link | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse of the negative binomial transform\n\n Parameters\n ----------\n z : array_like\n Usually the linear predictor for a GLM or GEE model\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the negative\n binomial link\n "
t = np.exp(z)
return (t / (self.alpha * ((1 - t) ** 2))) |
def _default_hashfunc(content, hashbits):
"\n Default hash function is variable-length version of Python's builtin hash.\n\n :param content: data that needs to hash.\n :return: return a decimal number.\n "
if (content == ''):
return 0
x = (ord(content[0]) << 7)
m = 1000003
mask = ((2 ** hashbits) - 1)
for c in content:
x = (((x * m) ^ ord(c)) & mask)
x ^= len(content)
if (x == (- 1)):
x = (- 2)
return x | 2,345,190,079,828,529,700 | Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number. | algorithms/hash/simhash.py | _default_hashfunc | SylvanasSun/code-snippets | python | def _default_hashfunc(content, hashbits):
"\n Default hash function is variable-length version of Python's builtin hash.\n\n :param content: data that needs to hash.\n :return: return a decimal number.\n "
if (content == ):
return 0
x = (ord(content[0]) << 7)
m = 1000003
mask = ((2 ** hashbits) - 1)
for c in content:
x = (((x * m) ^ ord(c)) & mask)
x ^= len(content)
if (x == (- 1)):
x = (- 2)
return x |
def _default_tokenizer_func(content, keyword_weight_pair):
"\n Default tokenizer function that uses jieba tokenizer.\n\n :param keyword_weight_pair: maximum pair number of the keyword-weight list.\n :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].\n "
seg_list = jieba.lcut_for_search(content)
return jieba.analyse.extract_tags(''.join(seg_list), topK=keyword_weight_pair, withWeight=True) | 5,208,231,525,523,260,000 | Default tokenizer function that uses jieba tokenizer.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. | algorithms/hash/simhash.py | _default_tokenizer_func | SylvanasSun/code-snippets | python | def _default_tokenizer_func(content, keyword_weight_pair):
"\n Default tokenizer function that uses jieba tokenizer.\n\n :param keyword_weight_pair: maximum pair number of the keyword-weight list.\n :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].\n "
seg_list = jieba.lcut_for_search(content)
return jieba.analyse.extract_tags(.join(seg_list), topK=keyword_weight_pair, withWeight=True) |
def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None):
'\n :param data: data that needs to be encode.\n :param keyword_weight_pair: maximum pair number of the keyword-weight list.\n :param hash_bit_number: maximum bit number for hashcode.\n :param hashfunc: hash function,its first parameter must be data that needs to be encode\n and the second parameter must be hash bit number.\n\n :param tokenizer_func: tokenizer function,its first parameter must be content that\n needs to be tokenizer and the second parameter must be\n keyword_weight_pair.\n '
if (hashfunc is None):
self.hashfunc = _default_hashfunc
else:
self.hashfunc = hashfunc
if (tokenizer_func is None):
self.tokenizer_func = _default_tokenizer_func
else:
self.tokenizer_func = tokenizer_func
self.hash_bit_number = hash_bit_number
self.keyword_weight_pari = keyword_weight_pair
if isinstance(data, Simhash):
self.hash = data.hash
elif isinstance(data, int):
self.hash = data
else:
self.simhash(data) | 6,896,240,115,283,153,000 | :param data: data that needs to be encode.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:param hash_bit_number: maximum bit number for hashcode.
:param hashfunc: hash function,its first parameter must be data that needs to be encode
and the second parameter must be hash bit number.
:param tokenizer_func: tokenizer function,its first parameter must be content that
needs to be tokenizer and the second parameter must be
keyword_weight_pair. | algorithms/hash/simhash.py | __init__ | SylvanasSun/code-snippets | python | def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None):
'\n :param data: data that needs to be encode.\n :param keyword_weight_pair: maximum pair number of the keyword-weight list.\n :param hash_bit_number: maximum bit number for hashcode.\n :param hashfunc: hash function,its first parameter must be data that needs to be encode\n and the second parameter must be hash bit number.\n\n :param tokenizer_func: tokenizer function,its first parameter must be content that\n needs to be tokenizer and the second parameter must be\n keyword_weight_pair.\n '
if (hashfunc is None):
self.hashfunc = _default_hashfunc
else:
self.hashfunc = hashfunc
if (tokenizer_func is None):
self.tokenizer_func = _default_tokenizer_func
else:
self.tokenizer_func = tokenizer_func
self.hash_bit_number = hash_bit_number
self.keyword_weight_pari = keyword_weight_pair
if isinstance(data, Simhash):
self.hash = data.hash
elif isinstance(data, int):
self.hash = data
else:
self.simhash(data) |
def simhash(self, content):
'\n Select policies for simhash on the different types of content.\n '
if (content is None):
self.hash = (- 1)
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception(('Unsupported parameter type %s' % type(content))) | 358,083,546,286,196,860 | Select policies for simhash on the different types of content. | algorithms/hash/simhash.py | simhash | SylvanasSun/code-snippets | python | def simhash(self, content):
'\n \n '
if (content is None):
self.hash = (- 1)
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception(('Unsupported parameter type %s' % type(content))) |
def build_from_features(self, features):
'\n :param features: a list of (token,weight) tuples or a token -> weight dict,\n if is a string so it need compute weight (a weight of 1 will be assumed).\n\n :return: a decimal digit for the accumulative result of each after handled features-weight pair.\n '
v = ([0] * self.hash_bit_number)
if isinstance(features, dict):
features = features.items()
for f in features:
if isinstance(f, str):
h = self.hashfunc(f, self.hash_bit_number)
w = 1
else:
assert isinstance(f, collections.Iterable)
h = self.hashfunc(f[0], self.hash_bit_number)
w = f[1]
for i in range(self.hash_bit_number):
bitmask = (1 << i)
v[i] += (w if (h & bitmask) else (- w))
fingerprint = 0
for i in range(self.hash_bit_number):
if (v[i] >= 0):
fingerprint += (1 << i)
return fingerprint | 3,623,918,119,554,579,500 | :param features: a list of (token,weight) tuples or a token -> weight dict,
if is a string so it need compute weight (a weight of 1 will be assumed).
:return: a decimal digit for the accumulative result of each after handled features-weight pair. | algorithms/hash/simhash.py | build_from_features | SylvanasSun/code-snippets | python | def build_from_features(self, features):
'\n :param features: a list of (token,weight) tuples or a token -> weight dict,\n if is a string so it need compute weight (a weight of 1 will be assumed).\n\n :return: a decimal digit for the accumulative result of each after handled features-weight pair.\n '
v = ([0] * self.hash_bit_number)
if isinstance(features, dict):
features = features.items()
for f in features:
if isinstance(f, str):
h = self.hashfunc(f, self.hash_bit_number)
w = 1
else:
assert isinstance(f, collections.Iterable)
h = self.hashfunc(f[0], self.hash_bit_number)
w = f[1]
for i in range(self.hash_bit_number):
bitmask = (1 << i)
v[i] += (w if (h & bitmask) else (- w))
fingerprint = 0
for i in range(self.hash_bit_number):
if (v[i] >= 0):
fingerprint += (1 << i)
return fingerprint |
def is_equal(self, another, limit=0.8):
'\n Determine two simhash are similar or not similar.\n\n :param another: another simhash.\n :param limit: a limit of the similarity.\n :return: if similarity greater than limit return true and else return false.\n '
if (another is None):
raise Exception('Parameter another is null')
if isinstance(another, int):
distance = self.hamming_distance(another)
elif isinstance(another, Simhash):
assert (self.hash_bit_number == another.hash_bit_number)
distance = self.hamming_distance(another.hash)
else:
raise Exception(('Unsupported parameter type %s' % type(another)))
similarity = (float((self.hash_bit_number - distance)) / self.hash_bit_number)
if (similarity > limit):
return True
return False | -145,368,186,127,737,300 | Determine two simhash are similar or not similar.
:param another: another simhash.
:param limit: a limit of the similarity.
:return: if similarity greater than limit return true and else return false. | algorithms/hash/simhash.py | is_equal | SylvanasSun/code-snippets | python | def is_equal(self, another, limit=0.8):
'\n Determine two simhash are similar or not similar.\n\n :param another: another simhash.\n :param limit: a limit of the similarity.\n :return: if similarity greater than limit return true and else return false.\n '
if (another is None):
raise Exception('Parameter another is null')
if isinstance(another, int):
distance = self.hamming_distance(another)
elif isinstance(another, Simhash):
assert (self.hash_bit_number == another.hash_bit_number)
distance = self.hamming_distance(another.hash)
else:
raise Exception(('Unsupported parameter type %s' % type(another)))
similarity = (float((self.hash_bit_number - distance)) / self.hash_bit_number)
if (similarity > limit):
return True
return False |
def hamming_distance(self, another):
'\n Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.\n\n :param another: another simhash value.\n :return: a hamming distance that current simhash and another simhash.\n '
x = ((self.hash ^ another) & ((1 << self.hash_bit_number) - 1))
result = 0
while x:
result += 1
x &= (x - 1)
return result | 4,441,790,304,206,754,300 | Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.
:param another: another simhash value.
:return: a hamming distance that current simhash and another simhash. | algorithms/hash/simhash.py | hamming_distance | SylvanasSun/code-snippets | python | def hamming_distance(self, another):
'\n Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.\n\n :param another: another simhash value.\n :return: a hamming distance that current simhash and another simhash.\n '
x = ((self.hash ^ another) & ((1 << self.hash_bit_number) - 1))
result = 0
while x:
result += 1
x &= (x - 1)
return result |
def relpath(self, current_file, rel_path):
'\n Compute path given current file and relative path.\n '
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path | -5,430,039,140,372,359,000 | Compute path given current file and relative path. | luigi/contrib/scalding.py | relpath | Ali-Tahir/luigi | python | def relpath(self, current_file, rel_path):
'\n \n '
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path |
def source(self):
'\n Path to the scala source for this Scalding Job\n\n Either one of source() or jar() must be specified.\n '
return None | -3,100,607,564,920,193,500 | Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified. | luigi/contrib/scalding.py | source | Ali-Tahir/luigi | python | def source(self):
'\n Path to the scala source for this Scalding Job\n\n Either one of source() or jar() must be specified.\n '
return None |
def jar(self):
'\n Path to the jar file for this Scalding Job\n\n Either one of source() or jar() must be specified.\n '
return None | -6,554,746,075,960,280,000 | Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified. | luigi/contrib/scalding.py | jar | Ali-Tahir/luigi | python | def jar(self):
'\n Path to the jar file for this Scalding Job\n\n Either one of source() or jar() must be specified.\n '
return None |
def extra_jars(self):
'\n Extra jars for building and running this Scalding Job.\n '
return [] | -6,212,587,920,033,463,000 | Extra jars for building and running this Scalding Job. | luigi/contrib/scalding.py | extra_jars | Ali-Tahir/luigi | python | def extra_jars(self):
'\n \n '
return [] |
def job_class(self):
'\n optional main job class for this Scalding Job.\n '
return None | 4,452,208,310,207,736,300 | optional main job class for this Scalding Job. | luigi/contrib/scalding.py | job_class | Ali-Tahir/luigi | python | def job_class(self):
'\n \n '
return None |
def atomic_output(self):
'\n If True, then rewrite output arguments to be temp locations and\n atomically move them into place after the job finishes.\n '
return True | 5,549,941,568,464,626,000 | If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes. | luigi/contrib/scalding.py | atomic_output | Ali-Tahir/luigi | python | def atomic_output(self):
'\n If True, then rewrite output arguments to be temp locations and\n atomically move them into place after the job finishes.\n '
return True |
def job_args(self):
'\n Extra arguments to pass to the Scalding job.\n '
return [] | 7,189,867,044,952,383,000 | Extra arguments to pass to the Scalding job. | luigi/contrib/scalding.py | job_args | Ali-Tahir/luigi | python | def job_args(self):
'\n \n '
return [] |
def args(self):
'\n Returns an array of args to pass to the job.\n '
arglist = []
for (k, v) in six.iteritems(self.requires_hadoop()):
arglist.append(('--' + k))
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist | -5,758,166,138,721,626,000 | Returns an array of args to pass to the job. | luigi/contrib/scalding.py | args | Ali-Tahir/luigi | python | def args(self):
'\n \n '
arglist = []
for (k, v) in six.iteritems(self.requires_hadoop()):
arglist.append(('--' + k))
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist |
def migrate():
' apply yoyo migrations '
logger.info('Migrating to the latest schema')
log.getLogger('yoyo').setLevel(log.DEBUG)
backend = get_backend(('sqlite:///' + DB_PATH))
migrations = read_migrations('./migrations')
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations)) | 5,327,263,784,229,965,000 | apply yoyo migrations | src/app/fs.py | migrate | ratijas/multi_vote_bot | python | def migrate():
' '
logger.info('Migrating to the latest schema')
log.getLogger('yoyo').setLevel(log.DEBUG)
backend = get_backend(('sqlite:///' + DB_PATH))
migrations = read_migrations('./migrations')
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations)) |
def setName(self, name=None):
' Set an individual name for the (sub) test. '
if (name != None):
self.name = name
else:
self.name = self.testName | 8,183,793,640,460,031,000 | Set an individual name for the (sub) test. | ctsimu/test.py | setName | BAMresearch/ctsimu-toolbox | python | def setName(self, name=None):
' '
if (name != None):
self.name = name
else:
self.name = self.testName |
def setResultFileDirectory(self, resultFileDirectory='.'):
' Set the location where test results should be saved. '
self.resultFileDirectory = resultFileDirectory
touchDirectory(self.resultFileDirectory) | 2,513,719,743,405,659,600 | Set the location where test results should be saved. | ctsimu/test.py | setResultFileDirectory | BAMresearch/ctsimu-toolbox | python | def setResultFileDirectory(self, resultFileDirectory='.'):
' '
self.resultFileDirectory = resultFileDirectory
touchDirectory(self.resultFileDirectory) |
def setRawOutput(self, rawOutput=False):
' Save intermediate projections as RAW instead of TIFF? '
self.rawOutput = rawOutput | 1,851,281,245,773,209,000 | Save intermediate projections as RAW instead of TIFF? | ctsimu/test.py | setRawOutput | BAMresearch/ctsimu-toolbox | python | def setRawOutput(self, rawOutput=False):
' '
self.rawOutput = rawOutput |
def plotResults(self):
' Plot results of evaluation. '
pass | -7,920,522,756,913,202,000 | Plot results of evaluation. | ctsimu/test.py | plotResults | BAMresearch/ctsimu-toolbox | python | def plotResults(self):
' '
pass |
def process_20_newsgroups(*, extract_dir='20_newsgroups', metadata=None, unpack_dir=None, opts={'subset': 'all', 'remove': "('headers', 'footers', 'quotes')"}):
'\n Process 20 newsgroups into (data, target, metadata) format.\n\n\n Parameters\n ----------\n unpack_dir: path\n The interim parent directory the dataset files have been unpacked into.\n extract_dir: str\n Name of the directory of the unpacked files relative to the unpack_dir. Note that\n opts: dict default {"subset":"all", "remove"="(\'headers\', \'footers\', \'quotes\')"}\n Options to pass to sklearn.datasets.fetch_20newsgroups.\n\n\n Returns\n -------\n A tuple:\n (data, target, additional_metadata)\n\n '
if (metadata is None):
metadata = {}
if (unpack_dir is None):
unpack_dir = paths['interim_data_path']
else:
unpack_dir = pathlib.Path(unpack_dir)
data_dir = (unpack_dir / f'{extract_dir}')
news = fetch_20newsgroups(**opts)
metadata['target_names'] = news.target_names
return (news.data, news.target, metadata) | 8,225,099,787,755,758,000 | Process 20 newsgroups into (data, target, metadata) format.
Parameters
----------
unpack_dir: path
The interim parent directory the dataset files have been unpacked into.
extract_dir: str
Name of the directory of the unpacked files relative to the unpack_dir. Note that
opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"}
Options to pass to sklearn.datasets.fetch_20newsgroups.
Returns
-------
A tuple:
(data, target, additional_metadata) | src/data/process_functions.py | process_20_newsgroups | acwooding/docmap_playground | python | def process_20_newsgroups(*, extract_dir='20_newsgroups', metadata=None, unpack_dir=None, opts={'subset': 'all', 'remove': "('headers', 'footers', 'quotes')"}):
'\n Process 20 newsgroups into (data, target, metadata) format.\n\n\n Parameters\n ----------\n unpack_dir: path\n The interim parent directory the dataset files have been unpacked into.\n extract_dir: str\n Name of the directory of the unpacked files relative to the unpack_dir. Note that\n opts: dict default {"subset":"all", "remove"="(\'headers\', \'footers\', \'quotes\')"}\n Options to pass to sklearn.datasets.fetch_20newsgroups.\n\n\n Returns\n -------\n A tuple:\n (data, target, additional_metadata)\n\n '
if (metadata is None):
metadata = {}
if (unpack_dir is None):
unpack_dir = paths['interim_data_path']
else:
unpack_dir = pathlib.Path(unpack_dir)
data_dir = (unpack_dir / f'{extract_dir}')
news = fetch_20newsgroups(**opts)
metadata['target_names'] = news.target_names
return (news.data, news.target, metadata) |
def main(items=None, printmd=None, printcal=False, found=False, save=None, download=None, requestor_pays=False, **kwargs):
' Main function for performing a search '
if (items is None):
search = Search.search(**kwargs)
if found:
num = search.found()
print(('%s items found' % num))
return num
items = search.items()
else:
items = Items.load(items)
print(('%s items found' % len(items)))
if (printmd is not None):
print(items.summary(printmd))
if printcal:
print(items.calendar())
if (save is not None):
items.save(filename=save)
if (download is not None):
if ('ALL' in download):
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items | -3,783,134,709,165,279,000 | Main function for performing a search | satsearch/main.py | main | lishrimp/sat-search | python | def main(items=None, printmd=None, printcal=False, found=False, save=None, download=None, requestor_pays=False, **kwargs):
' '
if (items is None):
search = Search.search(**kwargs)
if found:
num = search.found()
print(('%s items found' % num))
return num
items = search.items()
else:
items = Items.load(items)
print(('%s items found' % len(items)))
if (printmd is not None):
print(items.summary(printmd))
if printcal:
print(items.calendar())
if (save is not None):
items.save(filename=save)
if (download is not None):
if ('ALL' in download):
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items |
def _nose_tools_functions():
'Get an iterator of names and bound methods.'
module = _BUILDER.string_build(textwrap.dedent('\n import unittest\n\n class Test(unittest.TestCase):\n pass\n a = Test()\n '))
try:
case = next(module['a'].infer())
except astroid.InferenceError:
return
for method in case.methods():
if (method.name.startswith('assert') and ('_' not in method.name)):
pep8_name = _pep8(method.name)
(yield (pep8_name, astroid.BoundMethod(method, case)))
if (method.name == 'assertEqual'):
(yield ('assert_equals', astroid.BoundMethod(method, case))) | -155,066,971,101,152,830 | Get an iterator of names and bound methods. | venv/Lib/site-packages/astroid/brain/brain_nose.py | _nose_tools_functions | Nucl3arSn3k/randomplushmiku | python | def _nose_tools_functions():
module = _BUILDER.string_build(textwrap.dedent('\n import unittest\n\n class Test(unittest.TestCase):\n pass\n a = Test()\n '))
try:
case = next(module['a'].infer())
except astroid.InferenceError:
return
for method in case.methods():
if (method.name.startswith('assert') and ('_' not in method.name)):
pep8_name = _pep8(method.name)
(yield (pep8_name, astroid.BoundMethod(method, case)))
if (method.name == 'assertEqual'):
(yield ('assert_equals', astroid.BoundMethod(method, case))) |
def _nose_tools_trivial_transform():
'Custom transform for the nose.tools module.'
stub = _BUILDER.string_build('__all__ = []')
all_entries = ['ok_', 'eq_']
for (pep8_name, method) in _nose_tools_functions():
all_entries.append(pep8_name)
stub[pep8_name] = method
all_assign = stub['__all__'].parent
all_object = astroid.List(all_entries)
all_object.parent = all_assign
all_assign.value = all_object
return stub | 4,951,586,181,410,846,000 | Custom transform for the nose.tools module. | venv/Lib/site-packages/astroid/brain/brain_nose.py | _nose_tools_trivial_transform | Nucl3arSn3k/randomplushmiku | python | def _nose_tools_trivial_transform():
stub = _BUILDER.string_build('__all__ = []')
all_entries = ['ok_', 'eq_']
for (pep8_name, method) in _nose_tools_functions():
all_entries.append(pep8_name)
stub[pep8_name] = method
all_assign = stub['__all__'].parent
all_object = astroid.List(all_entries)
all_object.parent = all_assign
all_assign.value = all_object
return stub |
def _flatten_args(pairs_in, args_out, prefix, visited_stack):
'Helper function for flatten_args. See `flatten_args` below for details.'
for (key, v) in pairs_in:
if (not isinstance(key, str)):
raise ValueError(('Keys must be strings. %r' % key))
flat_key = (((prefix + '.') + key) if prefix else key)
if (v is None):
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = ('true' if v else 'false')
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if (not any(((v is entry) for entry in visited_stack))):
_flatten_args(v.items(), args_out, flat_key, (visited_stack + [v]))
elif isinstance(v, Sequence):
if (not any(((v is entry) for entry in visited_stack))):
_flatten_args(((str((i + 1)), vv) for (i, vv) in enumerate(v)), args_out, flat_key, (visited_stack + [v]))
else:
raise ValueError("Value for '{}' cannot be type: '{}'".format(flat_key, str(type(v)))) | -496,815,897,776,520,260 | Helper function for flatten_args. See `flatten_args` below for details. | dmlab2d/settings_helper.py | _flatten_args | LaudateCorpus1/lab2d | python | def _flatten_args(pairs_in, args_out, prefix, visited_stack):
for (key, v) in pairs_in:
if (not isinstance(key, str)):
raise ValueError(('Keys must be strings. %r' % key))
flat_key = (((prefix + '.') + key) if prefix else key)
if (v is None):
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = ('true' if v else 'false')
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if (not any(((v is entry) for entry in visited_stack))):
_flatten_args(v.items(), args_out, flat_key, (visited_stack + [v]))
elif isinstance(v, Sequence):
if (not any(((v is entry) for entry in visited_stack))):
_flatten_args(((str((i + 1)), vv) for (i, vv) in enumerate(v)), args_out, flat_key, (visited_stack + [v]))
else:
raise ValueError("Value for '{}' cannot be type: '{}'".format(flat_key, str(type(v)))) |
def flatten_args(args_in):
"Converts a dictionary of dictionarys and lists into a flat table.\n\n Args:\n args_in: dictionary containing a hierachy of dictionaries and lists. Leaf\n values can be strings, bools, numbers..\n\n Returns:\n A flat dictionary with keys separated by '.' and string values.\n "
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out | -401,289,397,659,758,140 | Converts a dictionary of dictionarys and lists into a flat table.
Args:
args_in: dictionary containing a hierachy of dictionaries and lists. Leaf
values can be strings, bools, numbers..
Returns:
A flat dictionary with keys separated by '.' and string values. | dmlab2d/settings_helper.py | flatten_args | LaudateCorpus1/lab2d | python | def flatten_args(args_in):
"Converts a dictionary of dictionarys and lists into a flat table.\n\n Args:\n args_in: dictionary containing a hierachy of dictionaries and lists. Leaf\n values can be strings, bools, numbers..\n\n Returns:\n A flat dictionary with keys separated by '.' and string values.\n "
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out |
def ReadTxtNet(file_path='', undirected=True):
' Read the txt network file. \n Notations: The network is unweighted.\n\n Parameters\n ----------\n file_path str : path of network file\n undirected bool : whether the edges are undirected\n\n Return\n ------\n net dict : a dict recording the connections in the graph\n node2id dict : a dict mapping the nodes to their embedding indices \n id2node dict : a dict mapping nodes embedding indices to the nodes\n '
if ((file_path == 'youtube') or (file_path == 'blog')):
name = file_path
dir = get_download_dir()
zip_file_path = '{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path, '{}/{}'.format(dir, name))
file_path = '{}/{}/{}-net.txt'.format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, 'r') as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(' ')))
assert (len(tup) in [2, 3]), 'The format of network file is unrecognizable.'
if (len(tup) == 3):
(n1, n2, w) = tup
elif (len(tup) == 2):
(n1, n2) = tup
w = 1
if (n1 not in node2id):
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if (n2 not in node2id):
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if (n1 not in net):
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif (n2 not in net[n1]):
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if (n2 not in net):
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif (n1 not in net[n2]):
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print(('node num: %d' % len(net)))
print(('edge num: %d' % len(src)))
assert (max(net.keys()) == (len(net) - 1)), 'error reading net, quit'
sm = sp.coo_matrix((np.array(weight), (src, dst)), dtype=np.float32)
return (net, node2id, id2node, sm) | 3,508,495,473,879,411,700 | Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes | examples/pytorch/ogb/line/reading_data.py | ReadTxtNet | IzabelaMazur/dgl | python | def ReadTxtNet(file_path=, undirected=True):
' Read the txt network file. \n Notations: The network is unweighted.\n\n Parameters\n ----------\n file_path str : path of network file\n undirected bool : whether the edges are undirected\n\n Return\n ------\n net dict : a dict recording the connections in the graph\n node2id dict : a dict mapping the nodes to their embedding indices \n id2node dict : a dict mapping nodes embedding indices to the nodes\n '
if ((file_path == 'youtube') or (file_path == 'blog')):
name = file_path
dir = get_download_dir()
zip_file_path = '{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path, '{}/{}'.format(dir, name))
file_path = '{}/{}/{}-net.txt'.format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, 'r') as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(' ')))
assert (len(tup) in [2, 3]), 'The format of network file is unrecognizable.'
if (len(tup) == 3):
(n1, n2, w) = tup
elif (len(tup) == 2):
(n1, n2) = tup
w = 1
if (n1 not in node2id):
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if (n2 not in node2id):
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if (n1 not in net):
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif (n2 not in net[n1]):
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if (n2 not in net):
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif (n1 not in net[n2]):
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print(('node num: %d' % len(net)))
print(('edge num: %d' % len(src)))
assert (max(net.keys()) == (len(net) - 1)), 'error reading net, quit'
sm = sp.coo_matrix((np.array(weight), (src, dst)), dtype=np.float32)
return (net, node2id, id2node, sm) |
def net2graph(net_sm):
' Transform the network to DGL graph\n\n Return \n ------\n G DGLGraph : graph by DGL\n '
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = (end - start)
print(('Building DGLGraph in %.2fs' % t))
return G | 5,918,307,427,968,118,000 | Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL | examples/pytorch/ogb/line/reading_data.py | net2graph | IzabelaMazur/dgl | python | def net2graph(net_sm):
' Transform the network to DGL graph\n\n Return \n ------\n G DGLGraph : graph by DGL\n '
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = (end - start)
print(('Building DGLGraph in %.2fs' % t))
return G |
def __init__(self, net_file, batch_size, num_samples, negative=5, gpus=[0], fast_neg=True, ogbl_name='', load_from_ogbl=False, ogbn_name='', load_from_ogbn=False):
" This class has the following functions:\n 1. Transform the txt network file into DGL graph;\n 2. Generate random walk sequences for the trainer;\n 3. Provide the negative table if the user hopes to sample negative\n nodes according to nodes' degrees;\n\n Parameter\n ---------\n net_file str : path of the dgl network file\n walk_length int : number of nodes in a sequence\n window_size int : context window size\n num_walks int : number of walks for each node\n batch_size int : number of node sequences in each batch\n negative int : negative samples for each positve node pair\n fast_neg bool : whether do negative sampling inside a batch\n "
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert (len(gpus) == 1), 'ogb.linkproppred is not compatible with multi-gpu training.'
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert (len(gpus) == 1), 'ogb.linkproppred is not compatible with multi-gpu training.'
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = make_undirected(self.G)
print('Finish reading graph')
self.num_nodes = self.G.number_of_nodes()
start = time.time()
seeds = np.random.choice(np.arange(self.G.number_of_edges()), self.num_samples, replace=True)
self.seeds = torch.split(torch.LongTensor(seeds), int(np.ceil((self.num_samples / self.num_procs))), 0)
end = time.time()
t = (end - start)
print(('generate %d samples in %.2fs' % (len(seeds), t)))
self.valid_nodes = find_connected_nodes(self.G)
if (not fast_neg):
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array((node_degree * 100000000.0), dtype=np.int)
self.neg_table = []
for (idx, node) in enumerate(self.valid_nodes):
self.neg_table += ([node] * node_degree[idx])
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree | 6,236,040,157,673,685,000 | This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the dgl network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch | examples/pytorch/ogb/line/reading_data.py | __init__ | IzabelaMazur/dgl | python | def __init__(self, net_file, batch_size, num_samples, negative=5, gpus=[0], fast_neg=True, ogbl_name=, load_from_ogbl=False, ogbn_name=, load_from_ogbn=False):
" This class has the following functions:\n 1. Transform the txt network file into DGL graph;\n 2. Generate random walk sequences for the trainer;\n 3. Provide the negative table if the user hopes to sample negative\n nodes according to nodes' degrees;\n\n Parameter\n ---------\n net_file str : path of the dgl network file\n walk_length int : number of nodes in a sequence\n window_size int : context window size\n num_walks int : number of walks for each node\n batch_size int : number of node sequences in each batch\n negative int : negative samples for each positve node pair\n fast_neg bool : whether do negative sampling inside a batch\n "
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert (len(gpus) == 1), 'ogb.linkproppred is not compatible with multi-gpu training.'
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert (len(gpus) == 1), 'ogb.linkproppred is not compatible with multi-gpu training.'
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = make_undirected(self.G)
print('Finish reading graph')
self.num_nodes = self.G.number_of_nodes()
start = time.time()
seeds = np.random.choice(np.arange(self.G.number_of_edges()), self.num_samples, replace=True)
self.seeds = torch.split(torch.LongTensor(seeds), int(np.ceil((self.num_samples / self.num_procs))), 0)
end = time.time()
t = (end - start)
print(('generate %d samples in %.2fs' % (len(seeds), t)))
self.valid_nodes = find_connected_nodes(self.G)
if (not fast_neg):
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array((node_degree * 100000000.0), dtype=np.int)
self.neg_table = []
for (idx, node) in enumerate(self.valid_nodes):
self.neg_table += ([node] * node_degree[idx])
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree |
def create_sampler(self, i):
' create random walk sampler '
return EdgeSampler(self.G, self.seeds[i]) | 9,179,441,167,527,142,000 | create random walk sampler | examples/pytorch/ogb/line/reading_data.py | create_sampler | IzabelaMazur/dgl | python | def create_sampler(self, i):
' '
return EdgeSampler(self.G, self.seeds[i]) |
def sample(self, seeds):
' seeds torch.LongTensor : a batch of indices of edges '
return self.edges[torch.LongTensor(seeds)] | -4,032,532,657,934,077,000 | seeds torch.LongTensor : a batch of indices of edges | examples/pytorch/ogb/line/reading_data.py | sample | IzabelaMazur/dgl | python | def sample(self, seeds):
' '
return self.edges[torch.LongTensor(seeds)] |
def mock_connection(aioclient_mock: AiohttpClientMocker) -> None:
'Mock the DirecTV connection for Home Assistant.'
aioclient_mock.get(f'http://{HOST}:8080/info/getVersion', text=load_fixture('directv/info-get-version.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/getLocations', text=load_fixture('directv/info-get-locations.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/mode', params={'clientAddr': 'B01234567890'}, text=load_fixture('directv/info-mode-standby.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/mode', params={'clientAddr': '9XXXXXXXXXX9'}, status=HTTPStatus.INTERNAL_SERVER_ERROR, text=load_fixture('directv/info-mode-error.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/mode', text=load_fixture('directv/info-mode.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/remote/processKey', text=load_fixture('directv/remote-process-key.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/tune', text=load_fixture('directv/tv-tune.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', params={'clientAddr': '2CA17D1CD30X'}, text=load_fixture('directv/tv-get-tuned.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', params={'clientAddr': 'A01234567890'}, text=load_fixture('directv/tv-get-tuned-music.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', params={'clientAddr': 'C01234567890'}, status=HTTPStatus.FORBIDDEN, text=load_fixture('directv/tv-get-tuned-restricted.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', text=load_fixture('directv/tv-get-tuned-movie.json'), headers={'Content-Type': CONTENT_TYPE_JSON}) | -5,259,314,499,135,104,000 | Mock the DirecTV connection for Home Assistant. | tests/components/directv/__init__.py | mock_connection | 2Fake/core | python | def mock_connection(aioclient_mock: AiohttpClientMocker) -> None:
aioclient_mock.get(f'http://{HOST}:8080/info/getVersion', text=load_fixture('directv/info-get-version.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/getLocations', text=load_fixture('directv/info-get-locations.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/mode', params={'clientAddr': 'B01234567890'}, text=load_fixture('directv/info-mode-standby.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/mode', params={'clientAddr': '9XXXXXXXXXX9'}, status=HTTPStatus.INTERNAL_SERVER_ERROR, text=load_fixture('directv/info-mode-error.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/info/mode', text=load_fixture('directv/info-mode.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/remote/processKey', text=load_fixture('directv/remote-process-key.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/tune', text=load_fixture('directv/tv-tune.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', params={'clientAddr': '2CA17D1CD30X'}, text=load_fixture('directv/tv-get-tuned.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', params={'clientAddr': 'A01234567890'}, text=load_fixture('directv/tv-get-tuned-music.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', params={'clientAddr': 'C01234567890'}, status=HTTPStatus.FORBIDDEN, text=load_fixture('directv/tv-get-tuned-restricted.json'), headers={'Content-Type': CONTENT_TYPE_JSON})
aioclient_mock.get(f'http://{HOST}:8080/tv/getTuned', text=load_fixture('directv/tv-get-tuned-movie.json'), headers={'Content-Type': CONTENT_TYPE_JSON}) |
async def setup_integration(hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, skip_entry_setup: bool=False, setup_error: bool=False) -> MockConfigEntry:
'Set up the DirecTV integration in Home Assistant.'
if setup_error:
aioclient_mock.get(f'http://{HOST}:8080/info/getVersion', status=HTTPStatus.INTERNAL_SERVER_ERROR)
else:
mock_connection(aioclient_mock)
entry = MockConfigEntry(domain=DOMAIN, unique_id=RECEIVER_ID, data={CONF_HOST: HOST, CONF_RECEIVER_ID: RECEIVER_ID})
entry.add_to_hass(hass)
if (not skip_entry_setup):
(await hass.config_entries.async_setup(entry.entry_id))
(await hass.async_block_till_done())
return entry | -8,370,659,672,752,647,000 | Set up the DirecTV integration in Home Assistant. | tests/components/directv/__init__.py | setup_integration | 2Fake/core | python | async def setup_integration(hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, skip_entry_setup: bool=False, setup_error: bool=False) -> MockConfigEntry:
if setup_error:
aioclient_mock.get(f'http://{HOST}:8080/info/getVersion', status=HTTPStatus.INTERNAL_SERVER_ERROR)
else:
mock_connection(aioclient_mock)
entry = MockConfigEntry(domain=DOMAIN, unique_id=RECEIVER_ID, data={CONF_HOST: HOST, CONF_RECEIVER_ID: RECEIVER_ID})
entry.add_to_hass(hass)
if (not skip_entry_setup):
(await hass.config_entries.async_setup(entry.entry_id))
(await hass.async_block_till_done())
return entry |
def __init__(self, **kwargs):
'\n Convolutional model\n :param kwargs:\n window_size: int\n stride_size: int\n test_percentage: float\n n_features: int\n n_outputs: int\n '
self.window_size = kwargs['window_size']
self.stride_size = kwargs['stride_size']
self.test_percentage = kwargs['test_percentage']
self.verbose = 0
self.epochs = 10
self.batch_size = 32
self.model = self.__create_model(kwargs['n_features'], kwargs['n_outputs']) | 434,908,339,896,038,900 | Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int | archive/model_archive/ConvModel.py | __init__ | Sensors-in-Paradise/OpportunityML | python | def __init__(self, **kwargs):
'\n Convolutional model\n :param kwargs:\n window_size: int\n stride_size: int\n test_percentage: float\n n_features: int\n n_outputs: int\n '
self.window_size = kwargs['window_size']
self.stride_size = kwargs['stride_size']
self.test_percentage = kwargs['test_percentage']
self.verbose = 0
self.epochs = 10
self.batch_size = 32
self.model = self.__create_model(kwargs['n_features'], kwargs['n_outputs']) |
def _get(self, *args, **kwargs):
"\n Retrieves a list of messages from the request's session. This storage\n always stores everything it is given, so return True for the\n all_retrieved flag.\n "
return (self.deserialize_messages(self.request.session.get(self.session_key)), True) | 5,995,305,131,204,208,000 | Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag. | django/contrib/messages/storage/session.py | _get | Acidburn0zzz/django | python | def _get(self, *args, **kwargs):
"\n Retrieves a list of messages from the request's session. This storage\n always stores everything it is given, so return True for the\n all_retrieved flag.\n "
return (self.deserialize_messages(self.request.session.get(self.session_key)), True) |
def _store(self, messages, response, *args, **kwargs):
"\n Stores a list of messages to the request's session.\n "
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return [] | -7,376,848,117,602,780,000 | Stores a list of messages to the request's session. | django/contrib/messages/storage/session.py | _store | Acidburn0zzz/django | python | def _store(self, messages, response, *args, **kwargs):
"\n \n "
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return [] |
def rad2deg(tensor: torch.Tensor) -> torch.Tensor:
'Function that converts angles from radians to degrees.\n\n Args:\n tensor (torch.Tensor): Tensor of arbitrary shape.\n\n Returns:\n torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)\n >>> output = rad2deg(input)\n '
if (not isinstance(tensor, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(tensor)))
return ((180.0 * tensor) / pi.to(tensor.device).type(tensor.dtype)) | -1,196,111,188,359,121,200 | Function that converts angles from radians to degrees.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: Tensor with same shape as input.
Example:
>>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)
>>> output = rad2deg(input) | kornia/geometry/conversions.py | rad2deg | anthonytec2/kornia | python | def rad2deg(tensor: torch.Tensor) -> torch.Tensor:
'Function that converts angles from radians to degrees.\n\n Args:\n tensor (torch.Tensor): Tensor of arbitrary shape.\n\n Returns:\n torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)\n >>> output = rad2deg(input)\n '
if (not isinstance(tensor, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(tensor)))
return ((180.0 * tensor) / pi.to(tensor.device).type(tensor.dtype)) |
def deg2rad(tensor: torch.Tensor) -> torch.Tensor:
'Function that converts angles from degrees to radians.\n\n Args:\n tensor (torch.Tensor): Tensor of arbitrary shape.\n\n Returns:\n torch.Tensor: tensor with same shape as input.\n\n Examples::\n\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = deg2rad(input)\n '
if (not isinstance(tensor, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(tensor)))
return ((tensor * pi.to(tensor.device).type(tensor.dtype)) / 180.0) | -2,303,698,553,219,946,800 | Function that converts angles from degrees to radians.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = deg2rad(input) | kornia/geometry/conversions.py | deg2rad | anthonytec2/kornia | python | def deg2rad(tensor: torch.Tensor) -> torch.Tensor:
'Function that converts angles from degrees to radians.\n\n Args:\n tensor (torch.Tensor): Tensor of arbitrary shape.\n\n Returns:\n torch.Tensor: tensor with same shape as input.\n\n Examples::\n\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = deg2rad(input)\n '
if (not isinstance(tensor, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(tensor)))
return ((tensor * pi.to(tensor.device).type(tensor.dtype)) / 180.0) |
def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'Function that converts polar coordinates to cartesian coordinates.\n\n Args:\n rho (torch.Tensor): Tensor of arbitrary shape.\n phi (torch.Tensor): Tensor of same arbitrary shape.\n\n Returns:\n torch.Tensor, torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> rho = torch.rand(1, 3, 3)\n >>> phi = torch.rand(1, 3, 3)\n >>> x, y = pol2cart(rho, phi)\n '
if (not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor))):
raise TypeError('Input type is not a torch.Tensor. Got {}, {}'.format(type(rho), type(phi)))
x = (rho * torch.cos(phi))
y = (rho * torch.sin(phi))
return (x, y) | -7,582,725,315,099,155,000 | Function that converts polar coordinates to cartesian coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> rho = torch.rand(1, 3, 3)
>>> phi = torch.rand(1, 3, 3)
>>> x, y = pol2cart(rho, phi) | kornia/geometry/conversions.py | pol2cart | anthonytec2/kornia | python | def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'Function that converts polar coordinates to cartesian coordinates.\n\n Args:\n rho (torch.Tensor): Tensor of arbitrary shape.\n phi (torch.Tensor): Tensor of same arbitrary shape.\n\n Returns:\n torch.Tensor, torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> rho = torch.rand(1, 3, 3)\n >>> phi = torch.rand(1, 3, 3)\n >>> x, y = pol2cart(rho, phi)\n '
if (not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor))):
raise TypeError('Input type is not a torch.Tensor. Got {}, {}'.format(type(rho), type(phi)))
x = (rho * torch.cos(phi))
y = (rho * torch.sin(phi))
return (x, y) |
def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float=1e-08) -> Tuple[(torch.Tensor, torch.Tensor)]:
'Function that converts cartesian coordinates to polar coordinates.\n\n Args:\n rho (torch.Tensor): Tensor of arbitrary shape.\n phi (torch.Tensor): Tensor of same arbitrary shape.\n eps (float): To avoid division by zero. Default is 1e-8\n\n Returns:\n torch.Tensor, torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> x = torch.rand(1, 3, 3)\n >>> y = torch.rand(1, 3, 3)\n >>> rho, phi = cart2pol(x, y)\n '
if (not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor))):
raise TypeError('Input type is not a torch.Tensor. Got {}, {}'.format(type(x), type(y)))
rho = torch.sqrt((((x ** 2) + (y ** 2)) + eps))
phi = torch.atan2(y, x)
return (rho, phi) | 6,833,031,384,997,357,000 | Function that converts cartesian coordinates to polar coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
eps (float): To avoid division by zero. Default is 1e-8
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> x = torch.rand(1, 3, 3)
>>> y = torch.rand(1, 3, 3)
>>> rho, phi = cart2pol(x, y) | kornia/geometry/conversions.py | cart2pol | anthonytec2/kornia | python | def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float=1e-08) -> Tuple[(torch.Tensor, torch.Tensor)]:
'Function that converts cartesian coordinates to polar coordinates.\n\n Args:\n rho (torch.Tensor): Tensor of arbitrary shape.\n phi (torch.Tensor): Tensor of same arbitrary shape.\n eps (float): To avoid division by zero. Default is 1e-8\n\n Returns:\n torch.Tensor, torch.Tensor: Tensor with same shape as input.\n\n Example:\n >>> x = torch.rand(1, 3, 3)\n >>> y = torch.rand(1, 3, 3)\n >>> rho, phi = cart2pol(x, y)\n '
if (not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor))):
raise TypeError('Input type is not a torch.Tensor. Got {}, {}'.format(type(x), type(y)))
rho = torch.sqrt((((x ** 2) + (y ** 2)) + eps))
phi = torch.atan2(y, x)
return (rho, phi) |
def convert_points_from_homogeneous(points: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Function that converts points from homogeneous to Euclidean space.\n\n Examples::\n\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = convert_points_from_homogeneous(input) # BxNx2\n '
if (not isinstance(points, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(points)))
if (len(points.shape) < 2):
raise ValueError('Input must be at least a 2D tensor. Got {}'.format(points.shape))
z_vec: torch.Tensor = points[..., (- 1):]
mask: torch.Tensor = (torch.abs(z_vec) > eps)
scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_(mask, (torch.tensor(1.0).to(points.device) / z_vec[mask]))
return (scale * points[..., :(- 1)]) | -4,069,164,611,214,838,300 | Function that converts points from homogeneous to Euclidean space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_from_homogeneous(input) # BxNx2 | kornia/geometry/conversions.py | convert_points_from_homogeneous | anthonytec2/kornia | python | def convert_points_from_homogeneous(points: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Function that converts points from homogeneous to Euclidean space.\n\n Examples::\n\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = convert_points_from_homogeneous(input) # BxNx2\n '
if (not isinstance(points, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(points)))
if (len(points.shape) < 2):
raise ValueError('Input must be at least a 2D tensor. Got {}'.format(points.shape))
z_vec: torch.Tensor = points[..., (- 1):]
mask: torch.Tensor = (torch.abs(z_vec) > eps)
scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_(mask, (torch.tensor(1.0).to(points.device) / z_vec[mask]))
return (scale * points[..., :(- 1)]) |
def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor:
'Function that converts points from Euclidean to homogeneous space.\n\n Examples::\n\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = convert_points_to_homogeneous(input) # BxNx4\n '
if (not isinstance(points, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(points)))
if (len(points.shape) < 2):
raise ValueError('Input must be at least a 2D tensor. Got {}'.format(points.shape))
return torch.nn.functional.pad(points, [0, 1], 'constant', 1.0) | -5,162,432,132,527,074,000 | Function that converts points from Euclidean to homogeneous space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_to_homogeneous(input) # BxNx4 | kornia/geometry/conversions.py | convert_points_to_homogeneous | anthonytec2/kornia | python | def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor:
'Function that converts points from Euclidean to homogeneous space.\n\n Examples::\n\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = convert_points_to_homogeneous(input) # BxNx4\n '
if (not isinstance(points, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(points)))
if (len(points.shape) < 2):
raise ValueError('Input must be at least a 2D tensor. Got {}'.format(points.shape))
return torch.nn.functional.pad(points, [0, 1], 'constant', 1.0) |
def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor:
'Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].\n\n Examples::\n\n >>> input = torch.rand(2, 2, 3) # Bx2x3\n >>> output = convert_affinematrix_to_homography(input) # Bx3x3\n '
if (not isinstance(A, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(A)))
if (not ((len(A.shape) == 3) and (A.shape[(- 2):] == (2, 3)))):
raise ValueError('Input matrix must be a Bx2x3 tensor. Got {}'.format(A.shape))
return _convert_affinematrix_to_homography_impl(A) | -7,483,404,685,304,305,000 | Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].
Examples::
>>> input = torch.rand(2, 2, 3) # Bx2x3
>>> output = convert_affinematrix_to_homography(input) # Bx3x3 | kornia/geometry/conversions.py | convert_affinematrix_to_homography | anthonytec2/kornia | python | def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor:
'Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].\n\n Examples::\n\n >>> input = torch.rand(2, 2, 3) # Bx2x3\n >>> output = convert_affinematrix_to_homography(input) # Bx3x3\n '
if (not isinstance(A, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(A)))
if (not ((len(A.shape) == 3) and (A.shape[(- 2):] == (2, 3)))):
raise ValueError('Input matrix must be a Bx2x3 tensor. Got {}'.format(A.shape))
return _convert_affinematrix_to_homography_impl(A) |
def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor:
'Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].\n\n Examples::\n\n >>> input = torch.rand(2, 3, 4) # Bx3x4\n >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4\n '
if (not isinstance(A, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(A)))
if (not ((len(A.shape) == 3) and (A.shape[(- 2):] == (3, 4)))):
raise ValueError('Input matrix must be a Bx3x4 tensor. Got {}'.format(A.shape))
return _convert_affinematrix_to_homography_impl(A) | 2,660,687,678,206,777,300 | Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].
Examples::
>>> input = torch.rand(2, 3, 4) # Bx3x4
>>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 | kornia/geometry/conversions.py | convert_affinematrix_to_homography3d | anthonytec2/kornia | python | def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor:
'Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].\n\n Examples::\n\n >>> input = torch.rand(2, 3, 4) # Bx3x4\n >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4\n '
if (not isinstance(A, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(A)))
if (not ((len(A.shape) == 3) and (A.shape[(- 2):] == (3, 4)))):
raise ValueError('Input matrix must be a Bx3x4 tensor. Got {}'.format(A.shape))
return _convert_affinematrix_to_homography_impl(A) |
def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
'Convert 3d vector of axis-angle rotation to 3x3 rotation matrix\n\n Args:\n angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.\n\n Returns:\n torch.Tensor: tensor of 3x3 rotation matrices.\n\n Shape:\n - Input: :math:`(N, 3)`\n - Output: :math:`(N, 3, 3)`\n\n Example:\n >>> input = torch.rand(1, 3) # Nx3\n >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3\n '
if (not isinstance(angle_axis, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(angle_axis)))
if (not (angle_axis.shape[(- 1)] == 3)):
raise ValueError('Input size must be a (*, 3) tensor. Got {}'.format(angle_axis.shape))
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-06):
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = (angle_axis / (theta + eps))
(wx, wy, wz) = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = (cos_theta + ((wx * wx) * (k_one - cos_theta)))
r10 = ((wz * sin_theta) + ((wx * wy) * (k_one - cos_theta)))
r20 = (((- wy) * sin_theta) + ((wx * wz) * (k_one - cos_theta)))
r01 = (((wx * wy) * (k_one - cos_theta)) - (wz * sin_theta))
r11 = (cos_theta + ((wy * wy) * (k_one - cos_theta)))
r21 = ((wx * sin_theta) + ((wy * wz) * (k_one - cos_theta)))
r02 = ((wy * sin_theta) + ((wx * wz) * (k_one - cos_theta)))
r12 = (((- wx) * sin_theta) + ((wy * wz) * (k_one - cos_theta)))
r22 = (cos_theta + ((wz * wz) * (k_one - cos_theta)))
rotation_matrix = torch.cat([r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view((- 1), 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
(rx, ry, rz) = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat([k_one, (- rz), ry, rz, k_one, (- rx), (- ry), rx, k_one], dim=1)
return rotation_matrix.view((- 1), 3, 3)
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
eps = 1e-06
mask = (theta2 > eps).view((- 1), 1, 1).to(theta2.device)
mask_pos = mask.type_as(theta2)
mask_neg = (mask == False).type_as(theta2)
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)
rotation_matrix[..., :3, :3] = ((mask_pos * rotation_matrix_normal) + (mask_neg * rotation_matrix_taylor))
return rotation_matrix | -3,174,089,505,320,541,000 | Convert 3d vector of axis-angle rotation to 3x3 rotation matrix
Args:
angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
torch.Tensor: tensor of 3x3 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 3, 3)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 | kornia/geometry/conversions.py | angle_axis_to_rotation_matrix | anthonytec2/kornia | python | def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
'Convert 3d vector of axis-angle rotation to 3x3 rotation matrix\n\n Args:\n angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.\n\n Returns:\n torch.Tensor: tensor of 3x3 rotation matrices.\n\n Shape:\n - Input: :math:`(N, 3)`\n - Output: :math:`(N, 3, 3)`\n\n Example:\n >>> input = torch.rand(1, 3) # Nx3\n >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3\n '
if (not isinstance(angle_axis, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(angle_axis)))
if (not (angle_axis.shape[(- 1)] == 3)):
raise ValueError('Input size must be a (*, 3) tensor. Got {}'.format(angle_axis.shape))
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-06):
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = (angle_axis / (theta + eps))
(wx, wy, wz) = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = (cos_theta + ((wx * wx) * (k_one - cos_theta)))
r10 = ((wz * sin_theta) + ((wx * wy) * (k_one - cos_theta)))
r20 = (((- wy) * sin_theta) + ((wx * wz) * (k_one - cos_theta)))
r01 = (((wx * wy) * (k_one - cos_theta)) - (wz * sin_theta))
r11 = (cos_theta + ((wy * wy) * (k_one - cos_theta)))
r21 = ((wx * sin_theta) + ((wy * wz) * (k_one - cos_theta)))
r02 = ((wy * sin_theta) + ((wx * wz) * (k_one - cos_theta)))
r12 = (((- wx) * sin_theta) + ((wy * wz) * (k_one - cos_theta)))
r22 = (cos_theta + ((wz * wz) * (k_one - cos_theta)))
rotation_matrix = torch.cat([r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view((- 1), 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
(rx, ry, rz) = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat([k_one, (- rz), ry, rz, k_one, (- rx), (- ry), rx, k_one], dim=1)
return rotation_matrix.view((- 1), 3, 3)
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
eps = 1e-06
mask = (theta2 > eps).view((- 1), 1, 1).to(theta2.device)
mask_pos = mask.type_as(theta2)
mask_neg = (mask == False).type_as(theta2)
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)
rotation_matrix[..., :3, :3] = ((mask_pos * rotation_matrix_normal) + (mask_neg * rotation_matrix_taylor))
return rotation_matrix |
def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor) -> torch.Tensor:
'Convert 3x3 rotation matrix to Rodrigues vector.\n\n Args:\n rotation_matrix (torch.Tensor): rotation matrix.\n\n Returns:\n torch.Tensor: Rodrigues vector transformation.\n\n Shape:\n - Input: :math:`(N, 3, 3)`\n - Output: :math:`(N, 3)`\n\n Example:\n >>> input = torch.rand(2, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_angle_axis(input) # Nx3\n '
if (not isinstance(rotation_matrix, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(rotation_matrix)))
if (not (rotation_matrix.shape[(- 2):] == (3, 3))):
raise ValueError('Input size must be a (*, 3, 3) tensor. Got {}'.format(rotation_matrix.shape))
quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion) | -4,264,213,605,656,858,000 | Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix (torch.Tensor): rotation matrix.
Returns:
torch.Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3 | kornia/geometry/conversions.py | rotation_matrix_to_angle_axis | anthonytec2/kornia | python | def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor) -> torch.Tensor:
'Convert 3x3 rotation matrix to Rodrigues vector.\n\n Args:\n rotation_matrix (torch.Tensor): rotation matrix.\n\n Returns:\n torch.Tensor: Rodrigues vector transformation.\n\n Shape:\n - Input: :math:`(N, 3, 3)`\n - Output: :math:`(N, 3)`\n\n Example:\n >>> input = torch.rand(2, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_angle_axis(input) # Nx3\n '
if (not isinstance(rotation_matrix, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(rotation_matrix)))
if (not (rotation_matrix.shape[(- 2):] == (3, 3))):
raise ValueError('Input size must be a (*, 3, 3) tensor. Got {}'.format(rotation_matrix.shape))
quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion) |
def rotation_matrix_to_quaternion(rotation_matrix: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Convert 3x3 rotation matrix to 4d quaternion vector.\n The quaternion vector has components in (x, y, z, w) format.\n\n Args:\n rotation_matrix (torch.Tensor): the rotation matrix to convert.\n eps (float): small value to avoid zero division. Default: 1e-8.\n\n Return:\n torch.Tensor: the rotation in quaternion.\n\n Shape:\n - Input: :math:`(*, 3, 3)`\n - Output: :math:`(*, 4)`\n\n Example:\n >>> input = torch.rand(4, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_quaternion(input) # Nx4\n '
if (not isinstance(rotation_matrix, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(rotation_matrix)))
if (not (rotation_matrix.shape[(- 2):] == (3, 3))):
raise ValueError('Input size must be a (*, 3, 3) tensor. Got {}'.format(rotation_matrix.shape))
def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor:
eps: float = torch.finfo(numerator.dtype).tiny
return (numerator / torch.clamp(denominator, min=eps))
rotation_matrix_vec: torch.Tensor = rotation_matrix.view(*rotation_matrix.shape[:(- 2)], 9)
(m00, m01, m02, m10, m11, m12, m20, m21, m22) = torch.chunk(rotation_matrix_vec, chunks=9, dim=(- 1))
trace: torch.Tensor = ((m00 + m11) + m22)
def trace_positive_cond():
sq = (torch.sqrt((trace + 1.0)) * 2.0)
qw = (0.25 * sq)
qx = safe_zero_division((m21 - m12), sq)
qy = safe_zero_division((m02 - m20), sq)
qz = safe_zero_division((m10 - m01), sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
def cond_1():
sq = (torch.sqrt(((((1.0 + m00) - m11) - m22) + eps)) * 2.0)
qw = safe_zero_division((m21 - m12), sq)
qx = (0.25 * sq)
qy = safe_zero_division((m01 + m10), sq)
qz = safe_zero_division((m02 + m20), sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
def cond_2():
sq = (torch.sqrt(((((1.0 + m11) - m00) - m22) + eps)) * 2.0)
qw = safe_zero_division((m02 - m20), sq)
qx = safe_zero_division((m01 + m10), sq)
qy = (0.25 * sq)
qz = safe_zero_division((m12 + m21), sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
def cond_3():
sq = (torch.sqrt(((((1.0 + m22) - m00) - m11) + eps)) * 2.0)
qw = safe_zero_division((m10 - m01), sq)
qx = safe_zero_division((m02 + m20), sq)
qy = safe_zero_division((m12 + m21), sq)
qz = (0.25 * sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
where_2 = torch.where((m11 > m22), cond_2(), cond_3())
where_1 = torch.where(((m00 > m11) & (m00 > m22)), cond_1(), where_2)
quaternion: torch.Tensor = torch.where((trace > 0.0), trace_positive_cond(), where_1)
return quaternion | -6,200,754,844,404,515,000 | Convert 3x3 rotation matrix to 4d quaternion vector.
The quaternion vector has components in (x, y, z, w) format.
Args:
rotation_matrix (torch.Tensor): the rotation matrix to convert.
eps (float): small value to avoid zero division. Default: 1e-8.
Return:
torch.Tensor: the rotation in quaternion.
Shape:
- Input: :math:`(*, 3, 3)`
- Output: :math:`(*, 4)`
Example:
>>> input = torch.rand(4, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_quaternion(input) # Nx4 | kornia/geometry/conversions.py | rotation_matrix_to_quaternion | anthonytec2/kornia | python | def rotation_matrix_to_quaternion(rotation_matrix: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Convert 3x3 rotation matrix to 4d quaternion vector.\n The quaternion vector has components in (x, y, z, w) format.\n\n Args:\n rotation_matrix (torch.Tensor): the rotation matrix to convert.\n eps (float): small value to avoid zero division. Default: 1e-8.\n\n Return:\n torch.Tensor: the rotation in quaternion.\n\n Shape:\n - Input: :math:`(*, 3, 3)`\n - Output: :math:`(*, 4)`\n\n Example:\n >>> input = torch.rand(4, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_quaternion(input) # Nx4\n '
if (not isinstance(rotation_matrix, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(rotation_matrix)))
if (not (rotation_matrix.shape[(- 2):] == (3, 3))):
raise ValueError('Input size must be a (*, 3, 3) tensor. Got {}'.format(rotation_matrix.shape))
def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor:
eps: float = torch.finfo(numerator.dtype).tiny
return (numerator / torch.clamp(denominator, min=eps))
rotation_matrix_vec: torch.Tensor = rotation_matrix.view(*rotation_matrix.shape[:(- 2)], 9)
(m00, m01, m02, m10, m11, m12, m20, m21, m22) = torch.chunk(rotation_matrix_vec, chunks=9, dim=(- 1))
trace: torch.Tensor = ((m00 + m11) + m22)
def trace_positive_cond():
sq = (torch.sqrt((trace + 1.0)) * 2.0)
qw = (0.25 * sq)
qx = safe_zero_division((m21 - m12), sq)
qy = safe_zero_division((m02 - m20), sq)
qz = safe_zero_division((m10 - m01), sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
def cond_1():
sq = (torch.sqrt(((((1.0 + m00) - m11) - m22) + eps)) * 2.0)
qw = safe_zero_division((m21 - m12), sq)
qx = (0.25 * sq)
qy = safe_zero_division((m01 + m10), sq)
qz = safe_zero_division((m02 + m20), sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
def cond_2():
sq = (torch.sqrt(((((1.0 + m11) - m00) - m22) + eps)) * 2.0)
qw = safe_zero_division((m02 - m20), sq)
qx = safe_zero_division((m01 + m10), sq)
qy = (0.25 * sq)
qz = safe_zero_division((m12 + m21), sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
def cond_3():
sq = (torch.sqrt(((((1.0 + m22) - m00) - m11) + eps)) * 2.0)
qw = safe_zero_division((m10 - m01), sq)
qx = safe_zero_division((m02 + m20), sq)
qy = safe_zero_division((m12 + m21), sq)
qz = (0.25 * sq)
return torch.cat([qx, qy, qz, qw], dim=(- 1))
where_2 = torch.where((m11 > m22), cond_2(), cond_3())
where_1 = torch.where(((m00 > m11) & (m00 > m22)), cond_1(), where_2)
quaternion: torch.Tensor = torch.where((trace > 0.0), trace_positive_cond(), where_1)
return quaternion |
def normalize_quaternion(quaternion: torch.Tensor, eps: float=1e-12) -> torch.Tensor:
'Normalizes a quaternion.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n normalized. The tensor can be of shape :math:`(*, 4)`.\n eps (Optional[bool]): small value to avoid division by zero.\n Default: 1e-12.\n\n Return:\n torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.\n\n Example:\n >>> quaternion = torch.tensor([1., 0., 1., 0.])\n >>> normalize_quaternion(quaternion)\n tensor([0.7071, 0.0000, 0.7071, 0.0000])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape (*, 4). Got {}'.format(quaternion.shape))
return F.normalize(quaternion, p=2, dim=(- 1), eps=eps) | 7,512,849,630,321,726,000 | Normalizes a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
normalized. The tensor can be of shape :math:`(*, 4)`.
eps (Optional[bool]): small value to avoid division by zero.
Default: 1e-12.
Return:
torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([1., 0., 1., 0.])
>>> normalize_quaternion(quaternion)
tensor([0.7071, 0.0000, 0.7071, 0.0000]) | kornia/geometry/conversions.py | normalize_quaternion | anthonytec2/kornia | python | def normalize_quaternion(quaternion: torch.Tensor, eps: float=1e-12) -> torch.Tensor:
'Normalizes a quaternion.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n normalized. The tensor can be of shape :math:`(*, 4)`.\n eps (Optional[bool]): small value to avoid division by zero.\n Default: 1e-12.\n\n Return:\n torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.\n\n Example:\n >>> quaternion = torch.tensor([1., 0., 1., 0.])\n >>> normalize_quaternion(quaternion)\n tensor([0.7071, 0.0000, 0.7071, 0.0000])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape (*, 4). Got {}'.format(quaternion.shape))
return F.normalize(quaternion, p=2, dim=(- 1), eps=eps) |
def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor:
'Converts a quaternion to a rotation matrix.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 4)`.\n\n Return:\n torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.\n\n Example:\n >>> quaternion = torch.tensor([0., 0., 1., 0.])\n >>> quaternion_to_rotation_matrix(quaternion)\n tensor([[-1., 0., 0.],\n [ 0., -1., 0.],\n [ 0., 0., 1.]])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape (*, 4). Got {}'.format(quaternion.shape))
quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)
(x, y, z, w) = torch.chunk(quaternion_norm, chunks=4, dim=(- 1))
tx: torch.Tensor = (2.0 * x)
ty: torch.Tensor = (2.0 * y)
tz: torch.Tensor = (2.0 * z)
twx: torch.Tensor = (tx * w)
twy: torch.Tensor = (ty * w)
twz: torch.Tensor = (tz * w)
txx: torch.Tensor = (tx * x)
txy: torch.Tensor = (ty * x)
txz: torch.Tensor = (tz * x)
tyy: torch.Tensor = (ty * y)
tyz: torch.Tensor = (tz * y)
tzz: torch.Tensor = (tz * z)
one: torch.Tensor = torch.tensor(1.0)
matrix: torch.Tensor = torch.stack([(one - (tyy + tzz)), (txy - twz), (txz + twy), (txy + twz), (one - (txx + tzz)), (tyz - twx), (txz - twy), (tyz + twx), (one - (txx + tyy))], dim=(- 1)).view((- 1), 3, 3)
if (len(quaternion.shape) == 1):
matrix = torch.squeeze(matrix, dim=0)
return matrix | 3,522,370,856,670,667,300 | Converts a quaternion to a rotation matrix.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 1., 0.])
>>> quaternion_to_rotation_matrix(quaternion)
tensor([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]]) | kornia/geometry/conversions.py | quaternion_to_rotation_matrix | anthonytec2/kornia | python | def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor:
'Converts a quaternion to a rotation matrix.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 4)`.\n\n Return:\n torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.\n\n Example:\n >>> quaternion = torch.tensor([0., 0., 1., 0.])\n >>> quaternion_to_rotation_matrix(quaternion)\n tensor([[-1., 0., 0.],\n [ 0., -1., 0.],\n [ 0., 0., 1.]])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape (*, 4). Got {}'.format(quaternion.shape))
quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)
(x, y, z, w) = torch.chunk(quaternion_norm, chunks=4, dim=(- 1))
tx: torch.Tensor = (2.0 * x)
ty: torch.Tensor = (2.0 * y)
tz: torch.Tensor = (2.0 * z)
twx: torch.Tensor = (tx * w)
twy: torch.Tensor = (ty * w)
twz: torch.Tensor = (tz * w)
txx: torch.Tensor = (tx * x)
txy: torch.Tensor = (ty * x)
txz: torch.Tensor = (tz * x)
tyy: torch.Tensor = (ty * y)
tyz: torch.Tensor = (tz * y)
tzz: torch.Tensor = (tz * z)
one: torch.Tensor = torch.tensor(1.0)
matrix: torch.Tensor = torch.stack([(one - (tyy + tzz)), (txy - twz), (txz + twy), (txy + twz), (one - (txx + tzz)), (tyz - twx), (txz - twy), (tyz + twx), (one - (txx + tyy))], dim=(- 1)).view((- 1), 3, 3)
if (len(quaternion.shape) == 1):
matrix = torch.squeeze(matrix, dim=0)
return matrix |
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
'Convert quaternion vector to angle axis of rotation.\n The quaternion should be in (x, y, z, w) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`\n\n Example:\n >>> quaternion = torch.rand(2, 4) # Nx4\n >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3\n '
if (not torch.is_tensor(quaternion)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape Nx4 or 4. Got {}'.format(quaternion.shape))
q1: torch.Tensor = quaternion[(..., 1)]
q2: torch.Tensor = quaternion[(..., 2)]
q3: torch.Tensor = quaternion[(..., 3)]
sin_squared_theta: torch.Tensor = (((q1 * q1) + (q2 * q2)) + (q3 * q3))
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[(..., 0)]
two_theta: torch.Tensor = (2.0 * torch.where((cos_theta < 0.0), torch.atan2((- sin_theta), (- cos_theta)), torch.atan2(sin_theta, cos_theta)))
k_pos: torch.Tensor = (two_theta / sin_theta)
k_neg: torch.Tensor = (2.0 * torch.ones_like(sin_theta))
k: torch.Tensor = torch.where((sin_squared_theta > 0.0), k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[(..., 0)] += (q1 * k)
angle_axis[(..., 1)] += (q2 * k)
angle_axis[(..., 2)] += (q3 * k)
return angle_axis | -3,117,967,537,888,511,000 | Convert quaternion vector to angle axis of rotation.
The quaternion should be in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 | kornia/geometry/conversions.py | quaternion_to_angle_axis | anthonytec2/kornia | python | def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
'Convert quaternion vector to angle axis of rotation.\n The quaternion should be in (x, y, z, w) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`\n\n Example:\n >>> quaternion = torch.rand(2, 4) # Nx4\n >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3\n '
if (not torch.is_tensor(quaternion)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape Nx4 or 4. Got {}'.format(quaternion.shape))
q1: torch.Tensor = quaternion[(..., 1)]
q2: torch.Tensor = quaternion[(..., 2)]
q3: torch.Tensor = quaternion[(..., 3)]
sin_squared_theta: torch.Tensor = (((q1 * q1) + (q2 * q2)) + (q3 * q3))
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[(..., 0)]
two_theta: torch.Tensor = (2.0 * torch.where((cos_theta < 0.0), torch.atan2((- sin_theta), (- cos_theta)), torch.atan2(sin_theta, cos_theta)))
k_pos: torch.Tensor = (two_theta / sin_theta)
k_neg: torch.Tensor = (2.0 * torch.ones_like(sin_theta))
k: torch.Tensor = torch.where((sin_squared_theta > 0.0), k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[(..., 0)] += (q1 * k)
angle_axis[(..., 1)] += (q2 * k)
angle_axis[(..., 2)] += (q3 * k)
return angle_axis |
def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Applies exponential map to log quaternion.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 3)`.\n\n Return:\n torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.\n\n Example:\n >>> quaternion = torch.tensor([0., 0., 0.])\n >>> quaternion_log_to_exp(quaternion)\n tensor([0., 0., 0., 1.])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 3)):
raise ValueError('Input must be a tensor of shape (*, 3). Got {}'.format(quaternion.shape))
norm_q: torch.Tensor = torch.norm(quaternion, p=2, dim=(- 1), keepdim=True).clamp(min=eps)
quaternion_vector: torch.Tensor = ((quaternion * torch.sin(norm_q)) / norm_q)
quaternion_scalar: torch.Tensor = torch.cos(norm_q)
quaternion_exp: torch.Tensor = torch.cat([quaternion_vector, quaternion_scalar], dim=(- 1))
return quaternion_exp | -2,785,614,319,673,772,500 | Applies exponential map to log quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 3)`.
Return:
torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0.])
>>> quaternion_log_to_exp(quaternion)
tensor([0., 0., 0., 1.]) | kornia/geometry/conversions.py | quaternion_log_to_exp | anthonytec2/kornia | python | def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Applies exponential map to log quaternion.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 3)`.\n\n Return:\n torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.\n\n Example:\n >>> quaternion = torch.tensor([0., 0., 0.])\n >>> quaternion_log_to_exp(quaternion)\n tensor([0., 0., 0., 1.])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 3)):
raise ValueError('Input must be a tensor of shape (*, 3). Got {}'.format(quaternion.shape))
norm_q: torch.Tensor = torch.norm(quaternion, p=2, dim=(- 1), keepdim=True).clamp(min=eps)
quaternion_vector: torch.Tensor = ((quaternion * torch.sin(norm_q)) / norm_q)
quaternion_scalar: torch.Tensor = torch.cos(norm_q)
quaternion_exp: torch.Tensor = torch.cat([quaternion_vector, quaternion_scalar], dim=(- 1))
return quaternion_exp |
def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Applies the log map to a quaternion.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 4)`.\n\n Return:\n torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.\n\n Example:\n >>> quaternion = torch.tensor([0., 0., 0., 1.])\n >>> quaternion_exp_to_log(quaternion)\n tensor([0., 0., 0.])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape (*, 4). Got {}'.format(quaternion.shape))
quaternion_vector: torch.Tensor = quaternion[..., 0:3]
quaternion_scalar: torch.Tensor = quaternion[..., 3:4]
norm_q: torch.Tensor = torch.norm(quaternion_vector, p=2, dim=(- 1), keepdim=True).clamp(min=eps)
quaternion_log: torch.Tensor = ((quaternion_vector * torch.acos(torch.clamp(quaternion_scalar, min=(- 1.0), max=1.0))) / norm_q)
return quaternion_log | 769,276,519,921,463,600 | Applies the log map to a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0., 1.])
>>> quaternion_exp_to_log(quaternion)
tensor([0., 0., 0.]) | kornia/geometry/conversions.py | quaternion_exp_to_log | anthonytec2/kornia | python | def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float=1e-08) -> torch.Tensor:
'Applies the log map to a quaternion.\n The quaternion should be in (x, y, z, w) format.\n\n Args:\n quaternion (torch.Tensor): a tensor containing a quaternion to be\n converted. The tensor can be of shape :math:`(*, 4)`.\n\n Return:\n torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.\n\n Example:\n >>> quaternion = torch.tensor([0., 0., 0., 1.])\n >>> quaternion_exp_to_log(quaternion)\n tensor([0., 0., 0.])\n '
if (not isinstance(quaternion, torch.Tensor)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(quaternion)))
if (not (quaternion.shape[(- 1)] == 4)):
raise ValueError('Input must be a tensor of shape (*, 4). Got {}'.format(quaternion.shape))
quaternion_vector: torch.Tensor = quaternion[..., 0:3]
quaternion_scalar: torch.Tensor = quaternion[..., 3:4]
norm_q: torch.Tensor = torch.norm(quaternion_vector, p=2, dim=(- 1), keepdim=True).clamp(min=eps)
quaternion_log: torch.Tensor = ((quaternion_vector * torch.acos(torch.clamp(quaternion_scalar, min=(- 1.0), max=1.0))) / norm_q)
return quaternion_log |
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
'Convert an angle axis to a quaternion.\n The quaternion vector has components in (x, y, z, w) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n angle_axis (torch.Tensor): tensor with angle axis.\n\n Return:\n torch.Tensor: tensor with quaternion.\n\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`\n\n Example:\n >>> angle_axis = torch.rand(2, 3) # Nx3\n >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4\n '
if (not torch.is_tensor(angle_axis)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(angle_axis)))
if (not (angle_axis.shape[(- 1)] == 3)):
raise ValueError('Input must be a tensor of shape Nx3 or 3. Got {}'.format(angle_axis.shape))
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = (((a0 * a0) + (a1 * a1)) + (a2 * a2))
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = (theta * 0.5)
mask: torch.Tensor = (theta_squared > 0.0)
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = (0.5 * ones)
k_pos: torch.Tensor = (torch.sin(half_theta) / theta)
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += (a0 * k)
quaternion[..., 1:2] += (a1 * k)
quaternion[..., 2:3] += (a2 * k)
return torch.cat([w, quaternion], dim=(- 1)) | -4,953,389,899,023,492,000 | Convert an angle axis to a quaternion.
The quaternion vector has components in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 3) # Nx3
>>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 | kornia/geometry/conversions.py | angle_axis_to_quaternion | anthonytec2/kornia | python | def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
'Convert an angle axis to a quaternion.\n The quaternion vector has components in (x, y, z, w) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n angle_axis (torch.Tensor): tensor with angle axis.\n\n Return:\n torch.Tensor: tensor with quaternion.\n\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`\n\n Example:\n >>> angle_axis = torch.rand(2, 3) # Nx3\n >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4\n '
if (not torch.is_tensor(angle_axis)):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(type(angle_axis)))
if (not (angle_axis.shape[(- 1)] == 3)):
raise ValueError('Input must be a tensor of shape Nx3 or 3. Got {}'.format(angle_axis.shape))
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = (((a0 * a0) + (a1 * a1)) + (a2 * a2))
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = (theta * 0.5)
mask: torch.Tensor = (theta_squared > 0.0)
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = (0.5 * ones)
k_pos: torch.Tensor = (torch.sin(half_theta) / theta)
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += (a0 * k)
quaternion[..., 1:2] += (a1 * k)
quaternion[..., 2:3] += (a2 * k)
return torch.cat([w, quaternion], dim=(- 1)) |
def normalize_pixel_coordinates(pixel_coordinates: torch.Tensor, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Normalize pixel coordinates between -1 and 1.\n\n Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the grid with pixel coordinates.\n Shape can be :math:`(*, 2)`.\n width (int): the maximum width in the x-axis.\n height (int): the maximum height in the y-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the normalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 2):
raise ValueError('Input pixel_coordinates must be of shape (*, 2). Got {}'.format(pixel_coordinates.shape))
hw: torch.Tensor = torch.stack([torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)])
factor: torch.Tensor = (torch.tensor(2.0, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps))
return ((factor * pixel_coordinates) - 1) | 5,259,801,237,466,521,000 | Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates. | kornia/geometry/conversions.py | normalize_pixel_coordinates | anthonytec2/kornia | python | def normalize_pixel_coordinates(pixel_coordinates: torch.Tensor, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Normalize pixel coordinates between -1 and 1.\n\n Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the grid with pixel coordinates.\n Shape can be :math:`(*, 2)`.\n width (int): the maximum width in the x-axis.\n height (int): the maximum height in the y-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the normalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 2):
raise ValueError('Input pixel_coordinates must be of shape (*, 2). Got {}'.format(pixel_coordinates.shape))
hw: torch.Tensor = torch.stack([torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)])
factor: torch.Tensor = (torch.tensor(2.0, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps))
return ((factor * pixel_coordinates) - 1) |
def denormalize_pixel_coordinates(pixel_coordinates: torch.Tensor, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Denormalize pixel coordinates.\n\n The input is assumed to be -1 if on extreme left, 1 if on\n extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the normalized grid coordinates.\n Shape can be :math:`(*, 2)`.\n width (int): the maximum width in the x-axis.\n height (int): the maximum height in the y-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the denormalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 2):
raise ValueError('Input pixel_coordinates must be of shape (*, 2). Got {}'.format(pixel_coordinates.shape))
hw: torch.Tensor = torch.stack([torch.tensor(width), torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = (torch.tensor(2.0) / (hw - 1).clamp(eps))
return ((torch.tensor(1.0) / factor) * (pixel_coordinates + 1)) | 4,021,415,155,370,516,000 | Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates. | kornia/geometry/conversions.py | denormalize_pixel_coordinates | anthonytec2/kornia | python | def denormalize_pixel_coordinates(pixel_coordinates: torch.Tensor, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Denormalize pixel coordinates.\n\n The input is assumed to be -1 if on extreme left, 1 if on\n extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the normalized grid coordinates.\n Shape can be :math:`(*, 2)`.\n width (int): the maximum width in the x-axis.\n height (int): the maximum height in the y-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the denormalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 2):
raise ValueError('Input pixel_coordinates must be of shape (*, 2). Got {}'.format(pixel_coordinates.shape))
hw: torch.Tensor = torch.stack([torch.tensor(width), torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = (torch.tensor(2.0) / (hw - 1).clamp(eps))
return ((torch.tensor(1.0) / factor) * (pixel_coordinates + 1)) |
def normalize_pixel_coordinates3d(pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Normalize pixel coordinates between -1 and 1.\n\n Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the grid with pixel coordinates.\n Shape can be :math:`(*, 3)`.\n depth (int): the maximum depth in the z-axis.\n height (int): the maximum height in the y-axis.\n width (int): the maximum width in the x-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the normalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 3):
raise ValueError('Input pixel_coordinates must be of shape (*, 3). Got {}'.format(pixel_coordinates.shape))
dhw: torch.Tensor = torch.stack([torch.tensor(depth), torch.tensor(width), torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = (torch.tensor(2.0) / (dhw - 1).clamp(eps))
return ((factor * pixel_coordinates) - 1) | -7,054,624,372,842,433,000 | Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the z-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates. | kornia/geometry/conversions.py | normalize_pixel_coordinates3d | anthonytec2/kornia | python | def normalize_pixel_coordinates3d(pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Normalize pixel coordinates between -1 and 1.\n\n Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the grid with pixel coordinates.\n Shape can be :math:`(*, 3)`.\n depth (int): the maximum depth in the z-axis.\n height (int): the maximum height in the y-axis.\n width (int): the maximum width in the x-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n Return:\n torch.Tensor: the normalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 3):
raise ValueError('Input pixel_coordinates must be of shape (*, 3). Got {}'.format(pixel_coordinates.shape))
dhw: torch.Tensor = torch.stack([torch.tensor(depth), torch.tensor(width), torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = (torch.tensor(2.0) / (dhw - 1).clamp(eps))
return ((factor * pixel_coordinates) - 1) |
def denormalize_pixel_coordinates3d(pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Denormalize pixel coordinates.\n\n The input is assumed to be -1 if on extreme left, 1 if on\n extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the normalized grid coordinates.\n Shape can be :math:`(*, 3)`.\n depth (int): the maximum depth in the x-axis.\n height (int): the maximum height in the y-axis.\n width (int): the maximum width in the x-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n\n Return:\n torch.Tensor: the denormalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 3):
raise ValueError('Input pixel_coordinates must be of shape (*, 3). Got {}'.format(pixel_coordinates.shape))
dhw: torch.Tensor = torch.stack([torch.tensor(depth), torch.tensor(width), torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = (torch.tensor(2.0) / (dhw - 1).clamp(eps))
return ((torch.tensor(1.0) / factor) * (pixel_coordinates + 1)) | 9,005,497,196,688,565,000 | Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the x-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates. | kornia/geometry/conversions.py | denormalize_pixel_coordinates3d | anthonytec2/kornia | python | def denormalize_pixel_coordinates3d(pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float=1e-08) -> torch.Tensor:
'Denormalize pixel coordinates.\n\n The input is assumed to be -1 if on extreme left, 1 if on\n extreme right (x = w-1).\n\n Args:\n pixel_coordinates (torch.Tensor): the normalized grid coordinates.\n Shape can be :math:`(*, 3)`.\n depth (int): the maximum depth in the x-axis.\n height (int): the maximum height in the y-axis.\n width (int): the maximum width in the x-axis.\n eps (float): safe division by zero. (default 1e-8).\n\n\n Return:\n torch.Tensor: the denormalized pixel coordinates.\n '
if (pixel_coordinates.shape[(- 1)] != 3):
raise ValueError('Input pixel_coordinates must be of shape (*, 3). Got {}'.format(pixel_coordinates.shape))
dhw: torch.Tensor = torch.stack([torch.tensor(depth), torch.tensor(width), torch.tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = (torch.tensor(2.0) / (dhw - 1).clamp(eps))
return ((torch.tensor(1.0) / factor) * (pixel_coordinates + 1)) |
def _set_group_flag(self):
'Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n '
self.flag = np.zeros(len(self), dtype=np.uint8) | 1,523,723,425,331,464,400 | Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0. | mmdet/datasets/classify/imagenet.py | _set_group_flag | anorthman/mmdetection | python | def _set_group_flag(self):
'Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n '
self.flag = np.zeros(len(self), dtype=np.uint8) |
def _compute_total_loss(self, labels, logits):
'Summation of the categorical hinge loss for labels and logits.'
error = 0.0
for (label, logit) in zip(labels, logits):
positive = (label * logit)
negative = ((1 - label) * logit)
error += np.maximum(0.0, ((negative - positive) + 1.0))
return error | -3,066,329,895,701,368,300 | Summation of the categorical hinge loss for labels and logits. | utils/train_eval_test.py | _compute_total_loss | AakashOfficial/tensor2robot | python | def _compute_total_loss(self, labels, logits):
error = 0.0
for (label, logit) in zip(labels, logits):
positive = (label * logit)
negative = ((1 - label) * logit)
error += np.maximum(0.0, ((negative - positive) + 1.0))
return error |
def test_train_eval_model(self):
'Tests that a simple model trains and exported models are valid.'
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
mock_input_generator_eval = mocks.MockInputGenerator(batch_size=1)
fake_hook_builder = FakeHookBuilder()
train_eval.train_eval_model(t2r_model=mock_t2r_model, input_generator_train=mock_input_generator_train, input_generator_eval=mock_input_generator_eval, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir, train_hook_builders=[fake_hook_builder], eval_hook_builders=[fake_hook_builder], eval_steps=_EVAL_STEPS, eval_throttle_secs=_EVAL_THROTTLE_SECS, create_exporters_fn=train_eval.create_default_exporters)
self.assertTrue(fake_hook_builder.hook_mock.begin.called)
best_exporter_numpy_path = os.path.join(model_dir, 'export', 'best_exporter_numpy', '*')
numpy_model_paths = sorted(tf.io.gfile.glob(best_exporter_numpy_path))
self.assertGreater(len(numpy_model_paths), 0)
self.assertLessEqual(len(numpy_model_paths), 5)
best_exporter_tf_example_path = os.path.join(model_dir, 'export', 'best_exporter_tf_example', '*')
tf_example_model_paths = sorted(tf.io.gfile.glob(best_exporter_tf_example_path))
self.assertGreater(len(tf_example_model_paths), 0)
self.assertLessEqual(len(tf_example_model_paths), 5)
estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(input_fn=mock_input_generator_eval.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL))
numpy_predictor_fn = contrib_predictor.from_saved_model(numpy_model_paths[(- 1)])
(features, labels) = mock_input_generator_eval.create_numpy_data()
ref_error = self._compute_total_loss(labels, [val['logit'].flatten() for val in prediction_ref])
numpy_predictions = []
for (feature, label) in zip(features, labels):
predicted = numpy_predictor_fn({'x': feature.reshape(1, (- 1))})['logit'].flatten()
numpy_predictions.append(predicted)
if (label > 0):
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
numpy_error = self._compute_total_loss(labels, numpy_predictions)
tf_example_predictor_fn = contrib_predictor.from_saved_model(tf_example_model_paths[(- 1)])
tf_example_predictions = []
for (feature, label) in zip(features, labels):
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(feature)
feed_dict = {'input_example_tensor': np.array(example.SerializeToString()).reshape(1)}
predicted = tf_example_predictor_fn(feed_dict)['logit'].flatten()
tf_example_predictions.append(predicted)
if (label > 0):
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
tf_example_error = self._compute_total_loss(labels, tf_example_predictions)
np.testing.assert_almost_equal(tf_example_error, numpy_error)
np.testing.assert_almost_equal(ref_error, tf_example_error, decimal=3) | -5,027,032,026,927,736,000 | Tests that a simple model trains and exported models are valid. | utils/train_eval_test.py | test_train_eval_model | AakashOfficial/tensor2robot | python | def test_train_eval_model(self):
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
mock_input_generator_eval = mocks.MockInputGenerator(batch_size=1)
fake_hook_builder = FakeHookBuilder()
train_eval.train_eval_model(t2r_model=mock_t2r_model, input_generator_train=mock_input_generator_train, input_generator_eval=mock_input_generator_eval, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir, train_hook_builders=[fake_hook_builder], eval_hook_builders=[fake_hook_builder], eval_steps=_EVAL_STEPS, eval_throttle_secs=_EVAL_THROTTLE_SECS, create_exporters_fn=train_eval.create_default_exporters)
self.assertTrue(fake_hook_builder.hook_mock.begin.called)
best_exporter_numpy_path = os.path.join(model_dir, 'export', 'best_exporter_numpy', '*')
numpy_model_paths = sorted(tf.io.gfile.glob(best_exporter_numpy_path))
self.assertGreater(len(numpy_model_paths), 0)
self.assertLessEqual(len(numpy_model_paths), 5)
best_exporter_tf_example_path = os.path.join(model_dir, 'export', 'best_exporter_tf_example', '*')
tf_example_model_paths = sorted(tf.io.gfile.glob(best_exporter_tf_example_path))
self.assertGreater(len(tf_example_model_paths), 0)
self.assertLessEqual(len(tf_example_model_paths), 5)
estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(input_fn=mock_input_generator_eval.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL))
numpy_predictor_fn = contrib_predictor.from_saved_model(numpy_model_paths[(- 1)])
(features, labels) = mock_input_generator_eval.create_numpy_data()
ref_error = self._compute_total_loss(labels, [val['logit'].flatten() for val in prediction_ref])
numpy_predictions = []
for (feature, label) in zip(features, labels):
predicted = numpy_predictor_fn({'x': feature.reshape(1, (- 1))})['logit'].flatten()
numpy_predictions.append(predicted)
if (label > 0):
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
numpy_error = self._compute_total_loss(labels, numpy_predictions)
tf_example_predictor_fn = contrib_predictor.from_saved_model(tf_example_model_paths[(- 1)])
tf_example_predictions = []
for (feature, label) in zip(features, labels):
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(feature)
feed_dict = {'input_example_tensor': np.array(example.SerializeToString()).reshape(1)}
predicted = tf_example_predictor_fn(feed_dict)['logit'].flatten()
tf_example_predictions.append(predicted)
if (label > 0):
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
tf_example_error = self._compute_total_loss(labels, tf_example_predictions)
np.testing.assert_almost_equal(tf_example_error, numpy_error)
np.testing.assert_almost_equal(ref_error, tf_example_error, decimal=3) |
def test_init_from_checkpoint_global_step(self):
'Tests that a simple model trains and exported models are valid.'
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
train_eval.train_eval_model(t2r_model=mock_t2r_model, input_generator_train=mock_input_generator_train, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir, eval_steps=_EVAL_STEPS, eval_throttle_secs=_EVAL_THROTTLE_SECS, create_exporters_fn=train_eval.create_default_exporters)
self.assertLen(tf.io.gfile.glob(os.path.join(model_dir, 'model*.meta')), 3)
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor, init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
train_eval.train_eval_model(t2r_model=continue_mock_t2r_model, input_generator_train=continue_mock_input_generator_train, model_dir=continue_model_dir, max_train_steps=(_MAX_TRAIN_STEPS + 100), eval_steps=_EVAL_STEPS, eval_throttle_secs=_EVAL_THROTTLE_SECS, create_exporters_fn=train_eval.create_default_exporters)
self.assertLen(tf.io.gfile.glob(os.path.join(continue_model_dir, 'model*.meta')), 2) | -3,967,083,315,317,678,000 | Tests that a simple model trains and exported models are valid. | utils/train_eval_test.py | test_init_from_checkpoint_global_step | AakashOfficial/tensor2robot | python | def test_init_from_checkpoint_global_step(self):
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
train_eval.train_eval_model(t2r_model=mock_t2r_model, input_generator_train=mock_input_generator_train, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir, eval_steps=_EVAL_STEPS, eval_throttle_secs=_EVAL_THROTTLE_SECS, create_exporters_fn=train_eval.create_default_exporters)
self.assertLen(tf.io.gfile.glob(os.path.join(model_dir, 'model*.meta')), 3)
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor, init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
train_eval.train_eval_model(t2r_model=continue_mock_t2r_model, input_generator_train=continue_mock_input_generator_train, model_dir=continue_model_dir, max_train_steps=(_MAX_TRAIN_STEPS + 100), eval_steps=_EVAL_STEPS, eval_throttle_secs=_EVAL_THROTTLE_SECS, create_exporters_fn=train_eval.create_default_exporters)
self.assertLen(tf.io.gfile.glob(os.path.join(continue_model_dir, 'model*.meta')), 2) |
def test_init_from_checkpoint_use_avg_model_params_and_weights(self):
'Tests that a simple model trains and exported models are valid.'
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor, use_avg_model_params=True)
mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=1)
mock_input_generator.set_specification_from_model(mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
train_eval.train_eval_model(t2r_model=mock_t2r_model, input_generator_train=mock_input_generator_train, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir)
init_checkpoint = tf.train.NewCheckpointReader(tf.train.latest_checkpoint(model_dir))
initial_estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir))
initial_predictions = [prediction['logit'] for prediction in list(initial_estimator_predict.predict(input_fn=mock_input_generator.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL)))]
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor, init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
train_eval.train_eval_model(t2r_model=continue_mock_t2r_model, input_generator_train=continue_mock_input_generator_train, model_dir=continue_model_dir, max_train_steps=_MAX_TRAIN_STEPS)
continue_checkpoint = tf.train.NewCheckpointReader(tf.train.latest_checkpoint(continue_model_dir))
for (tensor_name, _) in tf.train.list_variables(model_dir):
if ('ExponentialMovingAverage' in tensor_name):
continue
if ('Adam' in tensor_name):
continue
if ('global_step' in tensor_name):
continue
self.assertAllClose(init_checkpoint.get_tensor(tensor_name), continue_checkpoint.get_tensor(tensor_name), atol=0.001)
continue_estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=continue_model_dir))
continue_predictions = [prediction['logit'] for prediction in list(continue_estimator_predict.predict(input_fn=mock_input_generator.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL)))]
self.assertTrue(np.allclose(initial_predictions, continue_predictions, atol=0.1))
random_estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn)
random_predictions = [prediction['logit'] for prediction in list(random_estimator_predict.predict(input_fn=mock_input_generator.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL)))]
self.assertFalse(np.allclose(initial_predictions, random_predictions, atol=0.01)) | 4,479,116,241,257,387,500 | Tests that a simple model trains and exported models are valid. | utils/train_eval_test.py | test_init_from_checkpoint_use_avg_model_params_and_weights | AakashOfficial/tensor2robot | python | def test_init_from_checkpoint_use_avg_model_params_and_weights(self):
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor, use_avg_model_params=True)
mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=1)
mock_input_generator.set_specification_from_model(mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
train_eval.train_eval_model(t2r_model=mock_t2r_model, input_generator_train=mock_input_generator_train, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir)
init_checkpoint = tf.train.NewCheckpointReader(tf.train.latest_checkpoint(model_dir))
initial_estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir))
initial_predictions = [prediction['logit'] for prediction in list(initial_estimator_predict.predict(input_fn=mock_input_generator.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL)))]
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(preprocessor_cls=noop_preprocessor.NoOpPreprocessor, init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
train_eval.train_eval_model(t2r_model=continue_mock_t2r_model, input_generator_train=continue_mock_input_generator_train, model_dir=continue_model_dir, max_train_steps=_MAX_TRAIN_STEPS)
continue_checkpoint = tf.train.NewCheckpointReader(tf.train.latest_checkpoint(continue_model_dir))
for (tensor_name, _) in tf.train.list_variables(model_dir):
if ('ExponentialMovingAverage' in tensor_name):
continue
if ('Adam' in tensor_name):
continue
if ('global_step' in tensor_name):
continue
self.assertAllClose(init_checkpoint.get_tensor(tensor_name), continue_checkpoint.get_tensor(tensor_name), atol=0.001)
continue_estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=continue_model_dir))
continue_predictions = [prediction['logit'] for prediction in list(continue_estimator_predict.predict(input_fn=mock_input_generator.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL)))]
self.assertTrue(np.allclose(initial_predictions, continue_predictions, atol=0.1))
random_estimator_predict = tf.estimator.Estimator(model_fn=mock_t2r_model.model_fn)
random_predictions = [prediction['logit'] for prediction in list(random_estimator_predict.predict(input_fn=mock_input_generator.create_dataset_input_fn(mode=tf.estimator.ModeKeys.EVAL)))]
self.assertFalse(np.allclose(initial_predictions, random_predictions, atol=0.01)) |
async def async_setup_entry(hass, config_entry, async_add_entities, discovery_info=None):
'Set up the Agent cameras.'
filter_urllib3_logging()
cameras = []
server = hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION]
if (not server.devices):
_LOGGER.warning('Could not fetch cameras from Agent server')
return
for device in server.devices:
if (device.typeID == 2):
camera = AgentCamera(device)
cameras.append(camera)
async_add_entities(cameras)
platform = entity_platform.current_platform.get()
for (service, method) in CAMERA_SERVICES.items():
platform.async_register_entity_service(service, {}, method) | -561,701,980,941,086,000 | Set up the Agent cameras. | homeassistant/components/agent_dvr/camera.py | async_setup_entry | CantankerousBullMoose/core | python | async def async_setup_entry(hass, config_entry, async_add_entities, discovery_info=None):
filter_urllib3_logging()
cameras = []
server = hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION]
if (not server.devices):
_LOGGER.warning('Could not fetch cameras from Agent server')
return
for device in server.devices:
if (device.typeID == 2):
camera = AgentCamera(device)
cameras.append(camera)
async_add_entities(cameras)
platform = entity_platform.current_platform.get()
for (service, method) in CAMERA_SERVICES.items():
platform.async_register_entity_service(service, {}, method) |
def __init__(self, device):
'Initialize as a subclass of MjpegCamera.'
self._servername = device.client.name
self.server_url = device.client._server_url
device_info = {CONF_NAME: device.name, CONF_MJPEG_URL: f'{self.server_url}{device.mjpeg_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}', CONF_STILL_IMAGE_URL: f'{self.server_url}{device.still_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}'}
self.device = device
self._removed = False
self._name = f'{self._servername} {device.name}'
self._unique_id = f'{device._client.unique}_{device.typeID}_{device.id}'
super().__init__(device_info) | 4,172,856,412,794,285,000 | Initialize as a subclass of MjpegCamera. | homeassistant/components/agent_dvr/camera.py | __init__ | CantankerousBullMoose/core | python | def __init__(self, device):
self._servername = device.client.name
self.server_url = device.client._server_url
device_info = {CONF_NAME: device.name, CONF_MJPEG_URL: f'{self.server_url}{device.mjpeg_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}', CONF_STILL_IMAGE_URL: f'{self.server_url}{device.still_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}'}
self.device = device
self._removed = False
self._name = f'{self._servername} {device.name}'
self._unique_id = f'{device._client.unique}_{device.typeID}_{device.id}'
super().__init__(device_info) |
@property
def device_info(self):
'Return the device info for adding the entity to the agent object.'
return {'identifiers': {(AGENT_DOMAIN, self._unique_id)}, 'name': self._name, 'manufacturer': 'Agent', 'model': 'Camera', 'sw_version': self.device.client.version} | 7,824,553,129,315,086,000 | Return the device info for adding the entity to the agent object. | homeassistant/components/agent_dvr/camera.py | device_info | CantankerousBullMoose/core | python | @property
def device_info(self):
return {'identifiers': {(AGENT_DOMAIN, self._unique_id)}, 'name': self._name, 'manufacturer': 'Agent', 'model': 'Camera', 'sw_version': self.device.client.version} |
async def async_update(self):
'Update our state from the Agent API.'
try:
(await self.device.update())
if self._removed:
_LOGGER.debug('%s reacquired', self._name)
self._removed = False
except AgentError:
if self.device.client.is_available:
if (not self._removed):
_LOGGER.error('%s lost', self._name)
self._removed = True | 1,274,266,635,563,659,800 | Update our state from the Agent API. | homeassistant/components/agent_dvr/camera.py | async_update | CantankerousBullMoose/core | python | async def async_update(self):
try:
(await self.device.update())
if self._removed:
_LOGGER.debug('%s reacquired', self._name)
self._removed = False
except AgentError:
if self.device.client.is_available:
if (not self._removed):
_LOGGER.error('%s lost', self._name)
self._removed = True |
@property
def extra_state_attributes(self):
'Return the Agent DVR camera state attributes.'
return {ATTR_ATTRIBUTION: ATTRIBUTION, 'editable': False, 'enabled': self.is_on, 'connected': self.connected, 'detected': self.is_detected, 'alerted': self.is_alerted, 'has_ptz': self.device.has_ptz, 'alerts_enabled': self.device.alerts_active} | -736,943,086,208,970,400 | Return the Agent DVR camera state attributes. | homeassistant/components/agent_dvr/camera.py | extra_state_attributes | CantankerousBullMoose/core | python | @property
def extra_state_attributes(self):
return {ATTR_ATTRIBUTION: ATTRIBUTION, 'editable': False, 'enabled': self.is_on, 'connected': self.connected, 'detected': self.is_detected, 'alerted': self.is_alerted, 'has_ptz': self.device.has_ptz, 'alerts_enabled': self.device.alerts_active} |
@property
def should_poll(self) -> bool:
'Update the state periodically.'
return True | -1,688,106,608,858,049,800 | Update the state periodically. | homeassistant/components/agent_dvr/camera.py | should_poll | CantankerousBullMoose/core | python | @property
def should_poll(self) -> bool:
return True |
@property
def is_recording(self) -> bool:
'Return whether the monitor is recording.'
return self.device.recording | -9,086,336,331,135,627,000 | Return whether the monitor is recording. | homeassistant/components/agent_dvr/camera.py | is_recording | CantankerousBullMoose/core | python | @property
def is_recording(self) -> bool:
return self.device.recording |
@property
def is_alerted(self) -> bool:
'Return whether the monitor has alerted.'
return self.device.alerted | 2,899,730,911,809,477,600 | Return whether the monitor has alerted. | homeassistant/components/agent_dvr/camera.py | is_alerted | CantankerousBullMoose/core | python | @property
def is_alerted(self) -> bool:
return self.device.alerted |
@property
def is_detected(self) -> bool:
'Return whether the monitor has alerted.'
return self.device.detected | -3,371,326,521,136,155,600 | Return whether the monitor has alerted. | homeassistant/components/agent_dvr/camera.py | is_detected | CantankerousBullMoose/core | python | @property
def is_detected(self) -> bool:
return self.device.detected |
@property
def available(self) -> bool:
'Return True if entity is available.'
return self.device.client.is_available | -6,033,986,792,712,892,000 | Return True if entity is available. | homeassistant/components/agent_dvr/camera.py | available | CantankerousBullMoose/core | python | @property
def available(self) -> bool:
return self.device.client.is_available |
@property
def connected(self) -> bool:
'Return True if entity is connected.'
return self.device.connected | -5,834,607,589,438,554,000 | Return True if entity is connected. | homeassistant/components/agent_dvr/camera.py | connected | CantankerousBullMoose/core | python | @property
def connected(self) -> bool:
return self.device.connected |
@property
def supported_features(self) -> int:
'Return supported features.'
return SUPPORT_ON_OFF | -1,076,124,439,051,380,700 | Return supported features. | homeassistant/components/agent_dvr/camera.py | supported_features | CantankerousBullMoose/core | python | @property
def supported_features(self) -> int:
return SUPPORT_ON_OFF |
@property
def is_on(self) -> bool:
'Return true if on.'
return self.device.online | -5,295,751,153,541,704,000 | Return true if on. | homeassistant/components/agent_dvr/camera.py | is_on | CantankerousBullMoose/core | python | @property
def is_on(self) -> bool:
return self.device.online |
@property
def icon(self):
'Return the icon to use in the frontend, if any.'
if self.is_on:
return 'mdi:camcorder'
return 'mdi:camcorder-off' | 6,399,328,152,966,332,000 | Return the icon to use in the frontend, if any. | homeassistant/components/agent_dvr/camera.py | icon | CantankerousBullMoose/core | python | @property
def icon(self):
if self.is_on:
return 'mdi:camcorder'
return 'mdi:camcorder-off' |
@property
def motion_detection_enabled(self):
'Return the camera motion detection status.'
return self.device.detector_active | 6,028,155,109,194,979,000 | Return the camera motion detection status. | homeassistant/components/agent_dvr/camera.py | motion_detection_enabled | CantankerousBullMoose/core | python | @property
def motion_detection_enabled(self):
return self.device.detector_active |
@property
def unique_id(self) -> str:
'Return a unique identifier for this agent object.'
return self._unique_id | 1,440,107,947,840,357,000 | Return a unique identifier for this agent object. | homeassistant/components/agent_dvr/camera.py | unique_id | CantankerousBullMoose/core | python | @property
def unique_id(self) -> str:
return self._unique_id |
async def async_enable_alerts(self):
'Enable alerts.'
(await self.device.alerts_on()) | 2,796,611,269,641,991,000 | Enable alerts. | homeassistant/components/agent_dvr/camera.py | async_enable_alerts | CantankerousBullMoose/core | python | async def async_enable_alerts(self):
(await self.device.alerts_on()) |
async def async_disable_alerts(self):
'Disable alerts.'
(await self.device.alerts_off()) | -6,570,747,929,846,081,000 | Disable alerts. | homeassistant/components/agent_dvr/camera.py | async_disable_alerts | CantankerousBullMoose/core | python | async def async_disable_alerts(self):
(await self.device.alerts_off()) |
async def async_enable_motion_detection(self):
'Enable motion detection.'
(await self.device.detector_on()) | -8,601,139,264,879,610,000 | Enable motion detection. | homeassistant/components/agent_dvr/camera.py | async_enable_motion_detection | CantankerousBullMoose/core | python | async def async_enable_motion_detection(self):
(await self.device.detector_on()) |
async def async_disable_motion_detection(self):
'Disable motion detection.'
(await self.device.detector_off()) | -7,355,442,744,444,951,000 | Disable motion detection. | homeassistant/components/agent_dvr/camera.py | async_disable_motion_detection | CantankerousBullMoose/core | python | async def async_disable_motion_detection(self):
(await self.device.detector_off()) |
async def async_start_recording(self):
'Start recording.'
(await self.device.record()) | -1,824,808,121,995,718,100 | Start recording. | homeassistant/components/agent_dvr/camera.py | async_start_recording | CantankerousBullMoose/core | python | async def async_start_recording(self):
(await self.device.record()) |
async def async_stop_recording(self):
'Stop recording.'
(await self.device.record_stop()) | 5,086,747,341,827,256,000 | Stop recording. | homeassistant/components/agent_dvr/camera.py | async_stop_recording | CantankerousBullMoose/core | python | async def async_stop_recording(self):
(await self.device.record_stop()) |
async def async_turn_on(self):
'Enable the camera.'
(await self.device.enable()) | -2,295,833,452,988,536,300 | Enable the camera. | homeassistant/components/agent_dvr/camera.py | async_turn_on | CantankerousBullMoose/core | python | async def async_turn_on(self):
(await self.device.enable()) |
async def async_snapshot(self):
'Take a snapshot.'
(await self.device.snapshot()) | 857,259,597,287,051,100 | Take a snapshot. | homeassistant/components/agent_dvr/camera.py | async_snapshot | CantankerousBullMoose/core | python | async def async_snapshot(self):
(await self.device.snapshot()) |
async def async_turn_off(self):
'Disable the camera.'
(await self.device.disable()) | 7,337,812,568,937,745,000 | Disable the camera. | homeassistant/components/agent_dvr/camera.py | async_turn_off | CantankerousBullMoose/core | python | async def async_turn_off(self):
(await self.device.disable()) |
def __init__(__self__, *, resource_group_name: pulumi.Input[str], workspace_name: pulumi.Input[str], location: Optional[pulumi.Input[str]]=None, sku: Optional[pulumi.Input['SkuArgs']]=None, sql_pool_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None):
"\n The set of arguments for constructing a SqlPoolsV3 resource.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n :param pulumi.Input[str] location: The geo-location where the resource lives\n :param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.\n :param pulumi.Input[str] sql_pool_name: The name of the sql pool.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
pulumi.set(__self__, 'workspace_name', workspace_name)
if (location is not None):
pulumi.set(__self__, 'location', location)
if (sku is not None):
pulumi.set(__self__, 'sku', sku)
if (sql_pool_name is not None):
pulumi.set(__self__, 'sql_pool_name', sql_pool_name)
if (tags is not None):
pulumi.set(__self__, 'tags', tags) | -5,499,257,893,488,918,000 | The set of arguments for constructing a SqlPoolsV3 resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | __init__ | sebtelko/pulumi-azure-native | python | def __init__(__self__, *, resource_group_name: pulumi.Input[str], workspace_name: pulumi.Input[str], location: Optional[pulumi.Input[str]]=None, sku: Optional[pulumi.Input['SkuArgs']]=None, sql_pool_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None):
"\n The set of arguments for constructing a SqlPoolsV3 resource.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n :param pulumi.Input[str] location: The geo-location where the resource lives\n :param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.\n :param pulumi.Input[str] sql_pool_name: The name of the sql pool.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
pulumi.set(__self__, 'workspace_name', workspace_name)
if (location is not None):
pulumi.set(__self__, 'location', location)
if (sku is not None):
pulumi.set(__self__, 'sku', sku)
if (sql_pool_name is not None):
pulumi.set(__self__, 'sql_pool_name', sql_pool_name)
if (tags is not None):
pulumi.set(__self__, 'tags', tags) |
@property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n The name of the resource group. The name is case insensitive.\n '
return pulumi.get(self, 'resource_group_name') | 9,099,428,823,929,783,000 | The name of the resource group. The name is case insensitive. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | resource_group_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'resource_group_name') |
@property
@pulumi.getter(name='workspaceName')
def workspace_name(self) -> pulumi.Input[str]:
'\n The name of the workspace.\n '
return pulumi.get(self, 'workspace_name') | -6,043,356,629,165,876,000 | The name of the workspace. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | workspace_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='workspaceName')
def workspace_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'workspace_name') |
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
'\n The geo-location where the resource lives\n '
return pulumi.get(self, 'location') | -3,407,978,898,650,888,000 | The geo-location where the resource lives | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | location | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
'\n The sql pool SKU. The list of SKUs may vary by region and support offer.\n '
return pulumi.get(self, 'sku') | -9,123,214,329,469,217,000 | The sql pool SKU. The list of SKUs may vary by region and support offer. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sku | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
'\n \n '
return pulumi.get(self, 'sku') |
@property
@pulumi.getter(name='sqlPoolName')
def sql_pool_name(self) -> Optional[pulumi.Input[str]]:
'\n The name of the sql pool.\n '
return pulumi.get(self, 'sql_pool_name') | 2,546,227,187,852,153,000 | The name of the sql pool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sql_pool_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='sqlPoolName')
def sql_pool_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'sql_pool_name') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.