body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def _rec_to_ndarr(rec_arr, data_type=float):
'\n Function to transform a numpy record array to a nd array.\n dupe of SimPEG.electromagnetics.natural_source.utils.rec_to_ndarr to avoid circular import\n '
return np.array(recFunc.structured_to_unstructured(recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])), dtype=data_type) | -7,502,075,036,549,811,000 | Function to transform a numpy record array to a nd array.
dupe of SimPEG.electromagnetics.natural_source.utils.rec_to_ndarr to avoid circular import | SimPEG/electromagnetics/natural_source/survey.py | _rec_to_ndarr | JKutt/simpeg | python | def _rec_to_ndarr(rec_arr, data_type=float):
'\n Function to transform a numpy record array to a nd array.\n dupe of SimPEG.electromagnetics.natural_source.utils.rec_to_ndarr to avoid circular import\n '
return np.array(recFunc.structured_to_unstructured(recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])), dtype=data_type) |
def toRecArray(self, returnType='RealImag'):
"\n Returns a numpy.recarray for a SimpegNSEM impedance data object.\n\n :param returnType: Switches between returning a rec array where the impedance is split to real and imaginary ('RealImag') or is a complex ('Complex')\n :type returnType: str, optional\n :rtype: numpy.recarray\n :return: Record array with data, with indexed columns\n "
dtRI = [('freq', float), ('x', float), ('y', float), ('z', float), ('zxxr', float), ('zxxi', float), ('zxyr', float), ('zxyi', float), ('zyxr', float), ('zyxi', float), ('zyyr', float), ('zyyi', float), ('tzxr', float), ('tzxi', float), ('tzyr', float), ('tzyi', float)]
dtCP = [('freq', float), ('x', float), ('y', float), ('z', float), ('zxx', complex), ('zxy', complex), ('zyx', complex), ('zyy', complex), ('tzx', complex), ('tzy', complex)]
for src in self.survey.source_list:
locs = src.receiver_list[0].locations
if (locs.shape[1] == 1):
locs = np.hstack((np.array([[0.0, 0.0]]), locs))
elif (locs.shape[1] == 2):
locs = np.hstack((np.array([[0.0]]), locs))
tArrRec = np.concatenate(((src.freq * np.ones((locs.shape[0], 1))), locs, (np.nan * np.ones((locs.shape[0], 12)))), axis=1).view(dtRI)
typeList = [[rx.orientation, rx.component, self[(src, rx)]] for rx in src.receiver_list]
for (nr, (k, c, val)) in enumerate(typeList):
zt_type = ('t' if ('z' in k) else 'z')
key = ((zt_type + k) + c[0])
tArrRec[key] = mkvc(val, 2)
try:
outTemp = recFunc.stack_arrays((outTemp, tArrRec))
except NameError:
outTemp = tArrRec.copy()
if ('RealImag' in returnType):
outArr = outTemp.copy()
elif ('Complex' in returnType):
outArr = np.empty(outTemp.shape, dtype=dtCP)
for comp in ['freq', 'x', 'y', 'z']:
outArr[comp] = outTemp[comp].copy()
for comp in ['zxx', 'zxy', 'zyx', 'zyy', 'tzx', 'tzy']:
outArr[comp] = (outTemp[(comp + 'r')].copy() + (1j * outTemp[(comp + 'i')].copy()))
else:
raise NotImplementedError('{:s} is not implemented, as to be RealImag or Complex.')
return outArr | 3,530,613,215,818,577,000 | Returns a numpy.recarray for a SimpegNSEM impedance data object.
:param returnType: Switches between returning a rec array where the impedance is split to real and imaginary ('RealImag') or is a complex ('Complex')
:type returnType: str, optional
:rtype: numpy.recarray
:return: Record array with data, with indexed columns | SimPEG/electromagnetics/natural_source/survey.py | toRecArray | JKutt/simpeg | python | def toRecArray(self, returnType='RealImag'):
"\n Returns a numpy.recarray for a SimpegNSEM impedance data object.\n\n :param returnType: Switches between returning a rec array where the impedance is split to real and imaginary ('RealImag') or is a complex ('Complex')\n :type returnType: str, optional\n :rtype: numpy.recarray\n :return: Record array with data, with indexed columns\n "
dtRI = [('freq', float), ('x', float), ('y', float), ('z', float), ('zxxr', float), ('zxxi', float), ('zxyr', float), ('zxyi', float), ('zyxr', float), ('zyxi', float), ('zyyr', float), ('zyyi', float), ('tzxr', float), ('tzxi', float), ('tzyr', float), ('tzyi', float)]
dtCP = [('freq', float), ('x', float), ('y', float), ('z', float), ('zxx', complex), ('zxy', complex), ('zyx', complex), ('zyy', complex), ('tzx', complex), ('tzy', complex)]
for src in self.survey.source_list:
locs = src.receiver_list[0].locations
if (locs.shape[1] == 1):
locs = np.hstack((np.array([[0.0, 0.0]]), locs))
elif (locs.shape[1] == 2):
locs = np.hstack((np.array([[0.0]]), locs))
tArrRec = np.concatenate(((src.freq * np.ones((locs.shape[0], 1))), locs, (np.nan * np.ones((locs.shape[0], 12)))), axis=1).view(dtRI)
typeList = [[rx.orientation, rx.component, self[(src, rx)]] for rx in src.receiver_list]
for (nr, (k, c, val)) in enumerate(typeList):
zt_type = ('t' if ('z' in k) else 'z')
key = ((zt_type + k) + c[0])
tArrRec[key] = mkvc(val, 2)
try:
outTemp = recFunc.stack_arrays((outTemp, tArrRec))
except NameError:
outTemp = tArrRec.copy()
if ('RealImag' in returnType):
outArr = outTemp.copy()
elif ('Complex' in returnType):
outArr = np.empty(outTemp.shape, dtype=dtCP)
for comp in ['freq', 'x', 'y', 'z']:
outArr[comp] = outTemp[comp].copy()
for comp in ['zxx', 'zxy', 'zyx', 'zyy', 'tzx', 'tzy']:
outArr[comp] = (outTemp[(comp + 'r')].copy() + (1j * outTemp[(comp + 'i')].copy()))
else:
raise NotImplementedError('{:s} is not implemented, as to be RealImag or Complex.')
return outArr |
@classmethod
def fromRecArray(cls, recArray, srcType='primary'):
"\n Class method that reads in a numpy record array to NSEMdata object.\n\n :param recArray: Record array with the data. Has to have ('freq','x','y','z') columns and some ('zxx','zxy','zyx','zyy','tzx','tzy')\n :type recArray: numpy.recarray\n\n :param srcType: The type of SimPEG.EM.NSEM.SrcNSEM to be used\n :type srcType: str, optional\n\n "
if (srcType == 'primary'):
src = Planewave_xy_1Dprimary
elif (srcType == 'total'):
src = Planewave_xy_1DhomotD
else:
raise NotImplementedError('{:s} is not a valid source type for NSEMdata')
uniFreq = np.unique(recArray['freq'].copy())
srcList = []
dataList = []
for freq in uniFreq:
rxList = []
dFreq = recArray[(recArray['freq'] == freq)].copy()
rxTypes = [comp for comp in recArray.dtype.names if (((len(comp) == 4) or (len(comp) == 3)) and ('z' in comp))]
for rxType in rxTypes:
notNaNind = (~ np.isnan(dFreq[rxType].copy()))
if np.any(notNaNind):
locs = _rec_to_ndarr(dFreq[['x', 'y', 'z']][notNaNind].copy())
if (dFreq[rxType].dtype.name in 'complex128'):
if ('t' in rxType):
rxList.append(Point3DTipper(locs, rxType[1:3], 'real'))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DTipper(locs, rxType[1:3], 'imag'))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
elif ('z' in rxType):
rxList.append(Point3DImpedance(locs, rxType[1:3], 'real'))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DImpedance(locs, rxType[1:3], 'imag'))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
else:
component = ('real' if ('r' in rxType) else 'imag')
if ('z' in rxType):
rxList.append(Point3DImpedance(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
if ('t' in rxType):
rxList.append(Point3DTipper(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
srcList.append(src(rxList, freq))
survey = Survey(srcList)
dataVec = np.hstack(dataList)
return cls(survey, dataVec) | -6,614,493,823,147,336,000 | Class method that reads in a numpy record array to NSEMdata object.
:param recArray: Record array with the data. Has to have ('freq','x','y','z') columns and some ('zxx','zxy','zyx','zyy','tzx','tzy')
:type recArray: numpy.recarray
:param srcType: The type of SimPEG.EM.NSEM.SrcNSEM to be used
:type srcType: str, optional | SimPEG/electromagnetics/natural_source/survey.py | fromRecArray | JKutt/simpeg | python | @classmethod
def fromRecArray(cls, recArray, srcType='primary'):
"\n Class method that reads in a numpy record array to NSEMdata object.\n\n :param recArray: Record array with the data. Has to have ('freq','x','y','z') columns and some ('zxx','zxy','zyx','zyy','tzx','tzy')\n :type recArray: numpy.recarray\n\n :param srcType: The type of SimPEG.EM.NSEM.SrcNSEM to be used\n :type srcType: str, optional\n\n "
if (srcType == 'primary'):
src = Planewave_xy_1Dprimary
elif (srcType == 'total'):
src = Planewave_xy_1DhomotD
else:
raise NotImplementedError('{:s} is not a valid source type for NSEMdata')
uniFreq = np.unique(recArray['freq'].copy())
srcList = []
dataList = []
for freq in uniFreq:
rxList = []
dFreq = recArray[(recArray['freq'] == freq)].copy()
rxTypes = [comp for comp in recArray.dtype.names if (((len(comp) == 4) or (len(comp) == 3)) and ('z' in comp))]
for rxType in rxTypes:
notNaNind = (~ np.isnan(dFreq[rxType].copy()))
if np.any(notNaNind):
locs = _rec_to_ndarr(dFreq[['x', 'y', 'z']][notNaNind].copy())
if (dFreq[rxType].dtype.name in 'complex128'):
if ('t' in rxType):
rxList.append(Point3DTipper(locs, rxType[1:3], 'real'))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DTipper(locs, rxType[1:3], 'imag'))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
elif ('z' in rxType):
rxList.append(Point3DImpedance(locs, rxType[1:3], 'real'))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DImpedance(locs, rxType[1:3], 'imag'))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
else:
component = ('real' if ('r' in rxType) else 'imag')
if ('z' in rxType):
rxList.append(Point3DImpedance(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
if ('t' in rxType):
rxList.append(Point3DTipper(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
srcList.append(src(rxList, freq))
survey = Survey(srcList)
dataVec = np.hstack(dataList)
return cls(survey, dataVec) |
@contextmanager
def chdir(d):
'A context manager that temporary changes the working directory.\n '
olddir = os.getcwd()
os.chdir(d)
(yield)
os.chdir(olddir) | 1,714,808,242,589,344,500 | A context manager that temporary changes the working directory. | extra/release.py | chdir | DucNg/beets | python | @contextmanager
def chdir(d):
'\n '
olddir = os.getcwd()
os.chdir(d)
(yield)
os.chdir(olddir) |
def bump_version(version):
'Update the version number in setup.py, docs config, changelog,\n and root module.\n '
version_parts = [int(p) for p in version.split('.')]
assert (len(version_parts) == 3), 'invalid version number'
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
for (filename, locations) in VERSION_LOCS:
out_lines = []
with open(filename) as f:
found = False
for line in f:
for (pattern, template) in locations:
match = re.match(pattern, line)
if match:
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert (version_parts > old_parts), 'version must be newer than {}'.format(old_version)
out_lines.append((template.format(version=version, major=major, minor=minor) + '\n'))
found = True
break
else:
out_lines.append(line)
if (not found):
print(f'No pattern found in {filename}')
with open(filename, 'w') as f:
f.write(''.join(out_lines))
header_line = f'{version} (in development)'
header = (((('\n\n' + header_line) + '\n') + ('-' * len(header_line))) + '\n\n')
header += 'Changelog goes here!\n'
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n')
contents = ((contents[:location] + header) + contents[location:])
with open(CHANGELOG, 'w') as f:
f.write(contents) | 1,878,278,604,259,459,800 | Update the version number in setup.py, docs config, changelog,
and root module. | extra/release.py | bump_version | DucNg/beets | python | def bump_version(version):
'Update the version number in setup.py, docs config, changelog,\n and root module.\n '
version_parts = [int(p) for p in version.split('.')]
assert (len(version_parts) == 3), 'invalid version number'
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
for (filename, locations) in VERSION_LOCS:
out_lines = []
with open(filename) as f:
found = False
for line in f:
for (pattern, template) in locations:
match = re.match(pattern, line)
if match:
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert (version_parts > old_parts), 'version must be newer than {}'.format(old_version)
out_lines.append((template.format(version=version, major=major, minor=minor) + '\n'))
found = True
break
else:
out_lines.append(line)
if (not found):
print(f'No pattern found in {filename}')
with open(filename, 'w') as f:
f.write(.join(out_lines))
header_line = f'{version} (in development)'
header = (((('\n\n' + header_line) + '\n') + ('-' * len(header_line))) + '\n\n')
header += 'Changelog goes here!\n'
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n')
contents = ((contents[:location] + header) + contents[location:])
with open(CHANGELOG, 'w') as f:
f.write(contents) |
@release.command()
@click.argument('version')
def bump(version):
'Bump the version number.\n '
bump_version(version) | -6,585,817,667,597,795,000 | Bump the version number. | extra/release.py | bump | DucNg/beets | python | @release.command()
@click.argument('version')
def bump(version):
'\n '
bump_version(version) |
def get_latest_changelog():
'Extract the first section of the changelog.\n '
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match('^--+$', line.strip()):
if started:
del lines[(- 1)]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip() | -2,016,547,498,757,757,700 | Extract the first section of the changelog. | extra/release.py | get_latest_changelog | DucNg/beets | python | def get_latest_changelog():
'\n '
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match('^--+$', line.strip()):
if started:
del lines[(- 1)]
break
else:
started = True
elif started:
lines.append(line)
return .join(lines).strip() |
def rst2md(text):
'Use Pandoc to convert text from ReST to Markdown.\n '
pandoc = subprocess.Popen(['pandoc', '--from=rst', '--to=markdown', '--wrap=none'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, _) = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
return re.sub('^- ', '- ', md, flags=re.M) | -1,097,655,531,828,343,000 | Use Pandoc to convert text from ReST to Markdown. | extra/release.py | rst2md | DucNg/beets | python | def rst2md(text):
'\n '
pandoc = subprocess.Popen(['pandoc', '--from=rst', '--to=markdown', '--wrap=none'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, _) = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
return re.sub('^- ', '- ', md, flags=re.M) |
def changelog_as_markdown():
'Get the latest changelog entry as hacked up Markdown.\n '
rst = get_latest_changelog()
rst = re.sub(':doc:`/plugins/(\\w+)`', '``\\1``', rst)
rst = re.sub(':ref:`([^<]+)(<[^>]+>)`', '\\1', rst)
rst = re.sub('(\\s)`([^`]+)`([^_])', '\\1``\\2``\\3', rst)
rst = re.sub(':ref:`(\\w+)-cmd`', '``\\1``', rst)
rst = re.sub(':bug:`(\\d+)`', '#\\1', rst)
rst = re.sub(':user:`(\\w+)`', '@\\1', rst)
md = rst2md(rst)
md = re.sub('\\\\#(\\d+)\\b', '#\\1', md)
return md | -8,174,653,492,084,834,000 | Get the latest changelog entry as hacked up Markdown. | extra/release.py | changelog_as_markdown | DucNg/beets | python | def changelog_as_markdown():
'\n '
rst = get_latest_changelog()
rst = re.sub(':doc:`/plugins/(\\w+)`', '``\\1``', rst)
rst = re.sub(':ref:`([^<]+)(<[^>]+>)`', '\\1', rst)
rst = re.sub('(\\s)`([^`]+)`([^_])', '\\1``\\2``\\3', rst)
rst = re.sub(':ref:`(\\w+)-cmd`', '``\\1``', rst)
rst = re.sub(':bug:`(\\d+)`', '#\\1', rst)
rst = re.sub(':user:`(\\w+)`', '@\\1', rst)
md = rst2md(rst)
md = re.sub('\\\\#(\\d+)\\b', '#\\1', md)
return md |
@release.command()
def changelog():
"Get the most recent version's changelog as Markdown.\n "
print(changelog_as_markdown()) | 7,300,083,879,938,557,000 | Get the most recent version's changelog as Markdown. | extra/release.py | changelog | DucNg/beets | python | @release.command()
def changelog():
"\n "
print(changelog_as_markdown()) |
def get_version(index=0):
'Read the current version from the changelog.\n '
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search('^\\d+\\.\\d+\\.\\d+', line)
if match:
if (cur_index == index):
return match.group(0)
else:
cur_index += 1 | 2,229,043,007,905,601,000 | Read the current version from the changelog. | extra/release.py | get_version | DucNg/beets | python | def get_version(index=0):
'\n '
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search('^\\d+\\.\\d+\\.\\d+', line)
if match:
if (cur_index == index):
return match.group(0)
else:
cur_index += 1 |
@release.command()
def version():
'Display the current version.\n '
print(get_version()) | 4,739,928,957,776,093,000 | Display the current version. | extra/release.py | version | DucNg/beets | python | @release.command()
def version():
'\n '
print(get_version()) |
@release.command()
def datestamp():
"Enter today's date as the release date in the changelog.\n "
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if (marker in line):
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
lines.append((('-' * underline_length) + '\n'))
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line) | -7,742,174,435,753,201,000 | Enter today's date as the release date in the changelog. | extra/release.py | datestamp | DucNg/beets | python | @release.command()
def datestamp():
"\n "
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if (marker in line):
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
lines.append((('-' * underline_length) + '\n'))
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line) |
@release.command()
def prep():
'Run all steps to prepare a release.\n\n - Tag the commit.\n - Build the sdist package.\n - Generate the Markdown changelog to ``changelog.md``.\n - Bump the version number to the next version.\n '
cur_version = get_version()
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[(- 1)] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version) | 8,656,651,602,921,603,000 | Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version. | extra/release.py | prep | DucNg/beets | python | @release.command()
def prep():
'Run all steps to prepare a release.\n\n - Tag the commit.\n - Build the sdist package.\n - Generate the Markdown changelog to ``changelog.md``.\n - Bump the version number to the next version.\n '
cur_version = get_version()
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[(- 1)] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version) |
@release.command()
def publish():
'Unleash a release unto the world.\n\n - Push the tag to GitHub.\n - Upload to PyPI.\n '
version = get_version(1)
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path]) | -8,958,813,535,979,738,000 | Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI. | extra/release.py | publish | DucNg/beets | python | @release.command()
def publish():
'Unleash a release unto the world.\n\n - Push the tag to GitHub.\n - Upload to PyPI.\n '
version = get_version(1)
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path]) |
@release.command()
def ghrelease():
'Create a GitHub release using the `github-release` command-line\n tool.\n\n Reads the changelog to upload from `changelog.md`. Uploads the\n tarball from the `dist` directory.\n '
version = get_version(1)
tag = ('v' + version)
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
subprocess.check_call(['github-release', 'release', '-u', GITHUB_USER, '-r', GITHUB_REPO, '--tag', tag, '--name', f'{GITHUB_REPO} {version}', '--description', cl_md])
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['github-release', 'upload', '-u', GITHUB_USER, '-r', GITHUB_REPO, '--tag', tag, '--name', os.path.basename(tarball), '--file', tarball]) | 2,627,795,155,991,059,500 | Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory. | extra/release.py | ghrelease | DucNg/beets | python | @release.command()
def ghrelease():
'Create a GitHub release using the `github-release` command-line\n tool.\n\n Reads the changelog to upload from `changelog.md`. Uploads the\n tarball from the `dist` directory.\n '
version = get_version(1)
tag = ('v' + version)
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
subprocess.check_call(['github-release', 'release', '-u', GITHUB_USER, '-r', GITHUB_REPO, '--tag', tag, '--name', f'{GITHUB_REPO} {version}', '--description', cl_md])
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['github-release', 'upload', '-u', GITHUB_USER, '-r', GITHUB_REPO, '--tag', tag, '--name', os.path.basename(tarball), '--file', tarball]) |
def getEtiqueta(linea: str) -> str:
'Obtiene el nombre de la captura\n\n Args:\n linea (str): Linea donde se va a buscar la etiqueta\n\n Returns:\n str: Regresa el nombre de la etiqueta\n '
pattern = '\\s+([a-z]{1,5})\\s+([a-z]{1,24})'
busqueda = re.search(pattern, linea, re.IGNORECASE)
etiqueta = busqueda.group(2)
return etiqueta | -1,470,548,433,536,994,600 | Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta | Precompilar/relativo.py | getEtiqueta | EzioFenix/Compilador-M68HC11 | python | def getEtiqueta(linea: str) -> str:
'Obtiene el nombre de la captura\n\n Args:\n linea (str): Linea donde se va a buscar la etiqueta\n\n Returns:\n str: Regresa el nombre de la etiqueta\n '
pattern = '\\s+([a-z]{1,5})\\s+([a-z]{1,24})'
busqueda = re.search(pattern, linea, re.IGNORECASE)
etiqueta = busqueda.group(2)
return etiqueta |
def calcularEtiqueta(sustraendo: str, minuendo: str) -> str:
"Resta la diferencia entre dos PC en hexadecimal\n sustraendo - minuendo\n\n - Si\n - Sustraendo - minuendo\n - En caso de error regresa 'e10' operando muy grande\n\n Args:\n sustraendo (str): Ejemplo '0x7'\n minuendo (str): Ejemplo '0x1'\n\n Returns:\n str: Ejemplo '0x06'\n "
print(sustraendo)
print(minuendo)
sustraendo = int(sustraendo, 16)
minuendo = int(minuendo, 16)
resultado: int = (sustraendo - minuendo)
print(resultado)
if ((resultado < (- 127)) or (128 < resultado)):
return 'e10'
elif (resultado < 0):
return convertirA2Hex(resultado)
else:
return hex(resultado) | -1,091,928,827,621,050,600 | Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06' | Precompilar/relativo.py | calcularEtiqueta | EzioFenix/Compilador-M68HC11 | python | def calcularEtiqueta(sustraendo: str, minuendo: str) -> str:
"Resta la diferencia entre dos PC en hexadecimal\n sustraendo - minuendo\n\n - Si\n - Sustraendo - minuendo\n - En caso de error regresa 'e10' operando muy grande\n\n Args:\n sustraendo (str): Ejemplo '0x7'\n minuendo (str): Ejemplo '0x1'\n\n Returns:\n str: Ejemplo '0x06'\n "
print(sustraendo)
print(minuendo)
sustraendo = int(sustraendo, 16)
minuendo = int(minuendo, 16)
resultado: int = (sustraendo - minuendo)
print(resultado)
if ((resultado < (- 127)) or (128 < resultado)):
return 'e10'
elif (resultado < 0):
return convertirA2Hex(resultado)
else:
return hex(resultado) |
def bindigits(n: int, bits: int) -> str:
"Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo\n\n Args:\n n (int): E.g 7\n bits (int): eg 3\n\n Returns:\n str: E.g '001'\n "
s = bin((n & int(('1' * bits), 2)))[2:]
return ('{0:0>%s}' % bits).format(s) | -555,204,515,090,442,560 | Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001' | Precompilar/relativo.py | bindigits | EzioFenix/Compilador-M68HC11 | python | def bindigits(n: int, bits: int) -> str:
"Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo\n\n Args:\n n (int): E.g 7\n bits (int): eg 3\n\n Returns:\n str: E.g '001'\n "
s = bin((n & int(('1' * bits), 2)))[2:]
return ('{0:0>%s}' % bits).format(s) |
def convertirA2Hex(numero: int) -> str:
'Convierte un numero decimal a hexadecimal\n\n - Si el número es decimal lo convierte a complemento A2\n\n Args:\n numero (int): Número decimal que se quiere convertir Eg. 07\n\n Returns:\n str: Eg. 0x07\n '
cuantosBits = ((len(hex(numero)) - 2) * 4)
binario = bindigits(numero, cuantosBits)
return hex(int(binario, 2)) | -6,387,372,555,459,491,000 | Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07 | Precompilar/relativo.py | convertirA2Hex | EzioFenix/Compilador-M68HC11 | python | def convertirA2Hex(numero: int) -> str:
'Convierte un numero decimal a hexadecimal\n\n - Si el número es decimal lo convierte a complemento A2\n\n Args:\n numero (int): Número decimal que se quiere convertir Eg. 07\n\n Returns:\n str: Eg. 0x07\n '
cuantosBits = ((len(hex(numero)) - 2) * 4)
binario = bindigits(numero, cuantosBits)
return hex(int(binario, 2)) |
def tarako_passed(review):
'Add the tarako tag to the app.'
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review) | 1,414,064,931,217,316,000 | Add the tarako tag to the app. | mkt/reviewers/models.py | tarako_passed | ngokevin/zamboni | python | def tarako_passed(review):
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review) |
def tarako_failed(review):
'Remove the tarako tag from the app.'
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review) | -5,799,101,042,945,683,000 | Remove the tarako tag from the app. | mkt/reviewers/models.py | tarako_failed | ngokevin/zamboni | python | def tarako_failed(review):
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review) |
@classmethod
def get_event(cls, addon, status, **kwargs):
"Return the review event type constant.\n\n This is determined by the app type and the queue the addon is\n currently in (which is determined from the status).\n\n Note: We're not using addon.status because this is called after the\n status has been updated by the reviewer action.\n\n "
if addon.is_packaged:
if (status in amo.WEBAPPS_APPROVED_STATUSES):
return amo.REVIEWED_WEBAPP_UPDATE
else:
return amo.REVIEWED_WEBAPP_PACKAGED
else:
in_rereview = kwargs.pop('in_rereview', False)
if ((status in amo.WEBAPPS_APPROVED_STATUSES) and in_rereview):
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED | 7,765,533,970,006,714,000 | Return the review event type constant.
This is determined by the app type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action. | mkt/reviewers/models.py | get_event | ngokevin/zamboni | python | @classmethod
def get_event(cls, addon, status, **kwargs):
"Return the review event type constant.\n\n This is determined by the app type and the queue the addon is\n currently in (which is determined from the status).\n\n Note: We're not using addon.status because this is called after the\n status has been updated by the reviewer action.\n\n "
if addon.is_packaged:
if (status in amo.WEBAPPS_APPROVED_STATUSES):
return amo.REVIEWED_WEBAPP_UPDATE
else:
return amo.REVIEWED_WEBAPP_PACKAGED
else:
in_rereview = kwargs.pop('in_rereview', False)
if ((status in amo.WEBAPPS_APPROVED_STATUSES) and in_rereview):
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED |
@classmethod
def award_points(cls, user, addon, status, **kwargs):
'Awards points to user based on an event and the queue.\n\n `event` is one of the `REVIEWED_` keys in constants.\n `status` is one of the `STATUS_` keys in constants.\n\n '
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info((u'Awarding %s points to user %s for "%s" for addon %s' % (score, user, amo.REVIEWED_CHOICES[event], addon.id)).encode('utf-8'))
return score | -170,779,754,332,337,860 | Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants. | mkt/reviewers/models.py | award_points | ngokevin/zamboni | python | @classmethod
def award_points(cls, user, addon, status, **kwargs):
'Awards points to user based on an event and the queue.\n\n `event` is one of the `REVIEWED_` keys in constants.\n `status` is one of the `STATUS_` keys in constants.\n\n '
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info((u'Awarding %s points to user %s for "%s" for addon %s' % (score, user, amo.REVIEWED_CHOICES[event], addon.id)).encode('utf-8'))
return score |
@classmethod
def award_moderation_points(cls, user, addon, review_id):
'Awards points to user based on moderated review.'
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info((u'Awarding %s points to user %s for "%s" for review %s' % (score, user, amo.REVIEWED_CHOICES[event], review_id))) | -6,911,730,646,535,177,000 | Awards points to user based on moderated review. | mkt/reviewers/models.py | award_moderation_points | ngokevin/zamboni | python | @classmethod
def award_moderation_points(cls, user, addon, review_id):
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info((u'Awarding %s points to user %s for "%s" for review %s' % (score, user, amo.REVIEWED_CHOICES[event], review_id))) |
@classmethod
def get_total(cls, user):
'Returns total points by user.'
key = cls.get_key(('get_total:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
val = ReviewerScore.objects.no_cache().filter(user=user).aggregate(total=Sum('score')).values()[0]
if (val is None):
val = 0
cache.set(key, val, None)
return val | -763,553,648,910,366,800 | Returns total points by user. | mkt/reviewers/models.py | get_total | ngokevin/zamboni | python | @classmethod
def get_total(cls, user):
key = cls.get_key(('get_total:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
val = ReviewerScore.objects.no_cache().filter(user=user).aggregate(total=Sum('score')).values()[0]
if (val is None):
val = 0
cache.set(key, val, None)
return val |
@classmethod
def get_recent(cls, user, limit=5):
'Returns most recent ReviewerScore records.'
key = cls.get_key(('get_recent:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val | -5,398,352,852,748,852,000 | Returns most recent ReviewerScore records. | mkt/reviewers/models.py | get_recent | ngokevin/zamboni | python | @classmethod
def get_recent(cls, user, limit=5):
key = cls.get_key(('get_recent:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val |
@classmethod
def get_performance(cls, user):
'Returns sum of reviewer points.'
key = cls.get_key(('get_performance:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
sql = '\n SELECT `reviewer_scores`.*,\n SUM(`reviewer_scores`.`score`) AS `total`\n FROM `reviewer_scores`\n LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)\n WHERE `reviewer_scores`.`user_id` = %s\n ORDER BY `total` DESC\n '
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val | 7,279,862,741,735,119,000 | Returns sum of reviewer points. | mkt/reviewers/models.py | get_performance | ngokevin/zamboni | python | @classmethod
def get_performance(cls, user):
key = cls.get_key(('get_performance:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
sql = '\n SELECT `reviewer_scores`.*,\n SUM(`reviewer_scores`.`score`) AS `total`\n FROM `reviewer_scores`\n LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)\n WHERE `reviewer_scores`.`user_id` = %s\n ORDER BY `total` DESC\n '
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val |
@classmethod
def get_performance_since(cls, user, since):
'\n Returns sum of reviewer points since the given datetime.\n '
key = cls.get_key(('get_performance:%s:%s' % (user.id, since.isoformat())))
val = cache.get(key)
if (val is not None):
return val
sql = '\n SELECT `reviewer_scores`.*,\n SUM(`reviewer_scores`.`score`) AS `total`\n FROM `reviewer_scores`\n LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)\n WHERE `reviewer_scores`.`user_id` = %s AND\n `reviewer_scores`.`created` >= %s\n ORDER BY `total` DESC\n '
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val | 8,008,705,010,501,285,000 | Returns sum of reviewer points since the given datetime. | mkt/reviewers/models.py | get_performance_since | ngokevin/zamboni | python | @classmethod
def get_performance_since(cls, user, since):
'\n \n '
key = cls.get_key(('get_performance:%s:%s' % (user.id, since.isoformat())))
val = cache.get(key)
if (val is not None):
return val
sql = '\n SELECT `reviewer_scores`.*,\n SUM(`reviewer_scores`.`score`) AS `total`\n FROM `reviewer_scores`\n LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)\n WHERE `reviewer_scores`.`user_id` = %s AND\n `reviewer_scores`.`created` >= %s\n ORDER BY `total` DESC\n '
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val |
@classmethod
def _leaderboard_query(cls, since=None, types=None):
'\n Returns common SQL to leaderboard calls.\n '
query = cls.objects.values_list('user__id', 'user__display_name').annotate(total=Sum('score')).exclude(user__groups__name__in=('No Reviewer Incentives', 'Staff', 'Admins')).order_by('-total')
if (since is not None):
query = query.filter(created__gte=since)
if (types is not None):
query = query.filter(note_key__in=types)
return query | -7,027,020,255,559,473,000 | Returns common SQL to leaderboard calls. | mkt/reviewers/models.py | _leaderboard_query | ngokevin/zamboni | python | @classmethod
def _leaderboard_query(cls, since=None, types=None):
'\n \n '
query = cls.objects.values_list('user__id', 'user__display_name').annotate(total=Sum('score')).exclude(user__groups__name__in=('No Reviewer Incentives', 'Staff', 'Admins')).order_by('-total')
if (since is not None):
query = query.filter(created__gte=since)
if (types is not None):
query = query.filter(note_key__in=types)
return query |
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
"Returns leaderboards with ranking for the past given days.\n\n This will return a dict of 3 items::\n\n {'leader_top': [...],\n 'leader_near: [...],\n 'user_rank': (int)}\n\n If the user is not in the leaderboard, or if the user is in the top 5,\n 'leader_near' will be an empty list and 'leader_top' will contain 5\n elements instead of the normal 3.\n\n "
key = cls.get_key(('get_leaderboards:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
week_ago = (datetime.date.today() - datetime.timedelta(days=days))
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for (rank, row) in enumerate(query, 1):
(user_id, name, total) = row
scores.append({'user_id': user_id, 'name': name, 'rank': rank, 'total': int(total)})
if (user_id == user.id):
user_rank = rank
in_leaderboard = True
if (not in_leaderboard):
leader_top = scores[:5]
elif (user_rank <= 5):
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[(user_rank - 2)], scores[(user_rank - 1)]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass
val = {'leader_top': leader_top, 'leader_near': leader_near, 'user_rank': user_rank}
cache.set(key, val, None)
return val | -8,640,058,109,515,062,000 | Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3. | mkt/reviewers/models.py | get_leaderboards | ngokevin/zamboni | python | @classmethod
def get_leaderboards(cls, user, days=7, types=None):
"Returns leaderboards with ranking for the past given days.\n\n This will return a dict of 3 items::\n\n {'leader_top': [...],\n 'leader_near: [...],\n 'user_rank': (int)}\n\n If the user is not in the leaderboard, or if the user is in the top 5,\n 'leader_near' will be an empty list and 'leader_top' will contain 5\n elements instead of the normal 3.\n\n "
key = cls.get_key(('get_leaderboards:%s' % user.id))
val = cache.get(key)
if (val is not None):
return val
week_ago = (datetime.date.today() - datetime.timedelta(days=days))
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for (rank, row) in enumerate(query, 1):
(user_id, name, total) = row
scores.append({'user_id': user_id, 'name': name, 'rank': rank, 'total': int(total)})
if (user_id == user.id):
user_rank = rank
in_leaderboard = True
if (not in_leaderboard):
leader_top = scores[:5]
elif (user_rank <= 5):
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[(user_rank - 2)], scores[(user_rank - 1)]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass
val = {'leader_top': leader_top, 'leader_near': leader_near, 'user_rank': user_rank}
cache.set(key, val, None)
return val |
@classmethod
def all_users_by_score(cls):
'\n Returns reviewers ordered by highest total points first.\n '
query = cls._leaderboard_query()
scores = []
for row in query:
(user_id, name, total) = row
user_level = (len(amo.REVIEWED_LEVELS) - 1)
for (i, level) in enumerate(amo.REVIEWED_LEVELS):
if (total < level['points']):
user_level = (i - 1)
break
if (user_level < 0):
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({'user_id': user_id, 'name': name, 'total': int(total), 'level': level})
prev = None
for score in reversed(scores):
if (score['level'] == prev):
score['level'] = ''
else:
prev = score['level']
return scores | -463,350,881,850,450,900 | Returns reviewers ordered by highest total points first. | mkt/reviewers/models.py | all_users_by_score | ngokevin/zamboni | python | @classmethod
def all_users_by_score(cls):
'\n \n '
query = cls._leaderboard_query()
scores = []
for row in query:
(user_id, name, total) = row
user_level = (len(amo.REVIEWED_LEVELS) - 1)
for (i, level) in enumerate(amo.REVIEWED_LEVELS):
if (total < level['points']):
user_level = (i - 1)
break
if (user_level < 0):
level =
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({'user_id': user_id, 'name': name, 'total': int(total), 'level': level})
prev = None
for score in reversed(scores):
if (score['level'] == prev):
score['level'] =
else:
prev = score['level']
return scores |
def execute_post_review_task(self):
'\n Call the correct post-review function for the queue.\n '
if (self.passed is None):
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(self.app, self.reviewer, (self.comment or ''), action, queue=self.queue) | -707,987,219,861,457,900 | Call the correct post-review function for the queue. | mkt/reviewers/models.py | execute_post_review_task | ngokevin/zamboni | python | def execute_post_review_task(self):
'\n \n '
if (self.passed is None):
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(self.app, self.reviewer, (self.comment or ), action, queue=self.queue) |
def test_lookup_cache(self):
'\n Make sure that the content type cache (see ContentTypeManager)\n works correctly. Lookups for a particular content type -- by model or\n by ID -- should hit the database only on the first lookup.\n '
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries)) | 55,209,758,312,207,290 | Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model or
by ID -- should hit the database only on the first lookup. | django/contrib/contenttypes/tests.py | test_lookup_cache | coderanger/django | python | def test_lookup_cache(self):
'\n Make sure that the content type cache (see ContentTypeManager)\n works correctly. Lookups for a particular content type -- by model or\n by ID -- should hit the database only on the first lookup.\n '
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries)) |
def test_shortcut_view(self):
'\n Check that the shortcut view (used for the admin "view on site"\n functionality) returns a complete URL regardless of whether the sites\n framework is installed\n '
request = HttpRequest()
request.META = {'SERVER_NAME': 'Example.com', 'SERVER_PORT': '80'}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username='john')
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual(('http://%s/users/john/' % current_site.domain), response._headers.get('location')[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual('http://Example.com/users/john/', response._headers.get('location')[1]) | 5,184,480,617,571,533,000 | Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed | django/contrib/contenttypes/tests.py | test_shortcut_view | coderanger/django | python | def test_shortcut_view(self):
'\n Check that the shortcut view (used for the admin "view on site"\n functionality) returns a complete URL regardless of whether the sites\n framework is installed\n '
request = HttpRequest()
request.META = {'SERVER_NAME': 'Example.com', 'SERVER_PORT': '80'}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username='john')
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual(('http://%s/users/john/' % current_site.domain), response._headers.get('location')[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual('http://Example.com/users/john/', response._headers.get('location')[1]) |
def _get_paths_from_images(path):
'get image path list from image folder'
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for (dirpath, _, fnames) in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images | 3,823,462,890,462,621,700 | get image path list from image folder | mmedit/models/inpaintors/vic/common.py | _get_paths_from_images | f74066357/Image_Inpainting | python | def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for (dirpath, _, fnames) in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images |
def _get_paths_from_lmdb(dataroot):
'get image path list from lmdb'
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, 'rb'))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for (key, _) in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if (not key.endswith('.meta'))])
return (env, paths) | -5,246,789,503,854,577,000 | get image path list from lmdb | mmedit/models/inpaintors/vic/common.py | _get_paths_from_lmdb | f74066357/Image_Inpainting | python | def _get_paths_from_lmdb(dataroot):
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, 'rb'))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for (key, _) in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if (not key.endswith('.meta'))])
return (env, paths) |
def get_image_paths(data_type, dataroot):
'get image path list\n support lmdb or image files'
(env, paths) = (None, None)
if (dataroot is not None):
if (data_type == 'lmdb'):
(env, paths) = _get_paths_from_lmdb(dataroot)
elif (data_type == 'img'):
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return (env, paths) | -6,485,064,618,622,332,000 | get image path list
support lmdb or image files | mmedit/models/inpaintors/vic/common.py | get_image_paths | f74066357/Image_Inpainting | python | def get_image_paths(data_type, dataroot):
'get image path list\n support lmdb or image files'
(env, paths) = (None, None)
if (dataroot is not None):
if (data_type == 'lmdb'):
(env, paths) = _get_paths_from_lmdb(dataroot)
elif (data_type == 'img'):
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return (env, paths) |
def read_img(env, path, out_nc=3, fix_channels=True):
'\n Reads image using cv2 (rawpy if dng) or from lmdb by default\n (can also use using PIL instead of cv2)\n Arguments:\n out_nc: Desired number of channels\n fix_channels: changes the images to the desired number of channels\n Output:\n Numpy uint8, HWC, BGR, [0,255] by default\n '
img = None
if (env is None):
if (path[(- 3):].lower() == 'dng'):
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if (path[(- 3):].lower() == 'npy'):
with open(path, 'rb') as f:
img = np.load(f)
else:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
else:
img = _read_lmdb_img(env, path)
if fix_channels:
img = fix_img_channels(img, out_nc)
return img | -8,034,327,278,745,694,000 | Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default | mmedit/models/inpaintors/vic/common.py | read_img | f74066357/Image_Inpainting | python | def read_img(env, path, out_nc=3, fix_channels=True):
'\n Reads image using cv2 (rawpy if dng) or from lmdb by default\n (can also use using PIL instead of cv2)\n Arguments:\n out_nc: Desired number of channels\n fix_channels: changes the images to the desired number of channels\n Output:\n Numpy uint8, HWC, BGR, [0,255] by default\n '
img = None
if (env is None):
if (path[(- 3):].lower() == 'dng'):
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if (path[(- 3):].lower() == 'npy'):
with open(path, 'rb') as f:
img = np.load(f)
else:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
else:
img = _read_lmdb_img(env, path)
if fix_channels:
img = fix_img_channels(img, out_nc)
return img |
def fix_img_channels(img, out_nc):
'\n fix image channels to the expected number\n '
if (img.ndim == 2):
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
if ((out_nc == 3) and (img.shape[2] == 4)):
img = bgra2rgb(img)
elif (img.shape[2] > out_nc):
img = img[:, :, :out_nc]
elif ((img.shape[2] == 3) and (out_nc == 4)):
img = np.dstack((img, np.full(img.shape[:(- 1)], 255, dtype=np.uint8)))
return img | -2,006,961,338,334,872,300 | fix image channels to the expected number | mmedit/models/inpaintors/vic/common.py | fix_img_channels | f74066357/Image_Inpainting | python | def fix_img_channels(img, out_nc):
'\n \n '
if (img.ndim == 2):
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
if ((out_nc == 3) and (img.shape[2] == 4)):
img = bgra2rgb(img)
elif (img.shape[2] > out_nc):
img = img[:, :, :out_nc]
elif ((img.shape[2] == 3) and (out_nc == 4)):
img = np.dstack((img, np.full(img.shape[:(- 1)], 255, dtype=np.uint8)))
return img |
def bgra2rgb(img):
'\n cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,\n this gets rid of wrong transparent colors that can harm training\n '
if (img.shape[2] == 4):
(b, g, r, a) = cv2.split(img.astype(np.uint8))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
return cv2.merge([b, g, r])
return img | 2,376,992,496,783,938,000 | cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training | mmedit/models/inpaintors/vic/common.py | bgra2rgb | f74066357/Image_Inpainting | python | def bgra2rgb(img):
'\n cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,\n this gets rid of wrong transparent colors that can harm training\n '
if (img.shape[2] == 4):
(b, g, r, a) = cv2.split(img.astype(np.uint8))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
return cv2.merge([b, g, r])
return img |
def rgb2ycbcr(img, only_y=True):
'same as matlab rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '
in_img_type = img.dtype
img_ = img.astype(np.float32)
if (in_img_type != np.uint8):
img_ *= 255.0
if only_y:
rlt = ((np.dot(img_, [65.481, 128.553, 24.966]) / 255.0) + 16.0)
else:
rlt = ((np.matmul(img_, [[65.481, (- 37.797), 112.0], [128.553, (- 74.203), (- 93.786)], [24.966, 112.0, (- 18.214)]]) / 255.0) + [16, 128, 128])
if (in_img_type == np.uint8):
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) | -4,436,954,248,337,563,600 | same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1] | mmedit/models/inpaintors/vic/common.py | rgb2ycbcr | f74066357/Image_Inpainting | python | def rgb2ycbcr(img, only_y=True):
'same as matlab rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '
in_img_type = img.dtype
img_ = img.astype(np.float32)
if (in_img_type != np.uint8):
img_ *= 255.0
if only_y:
rlt = ((np.dot(img_, [65.481, 128.553, 24.966]) / 255.0) + 16.0)
else:
rlt = ((np.matmul(img_, [[65.481, (- 37.797), 112.0], [128.553, (- 74.203), (- 93.786)], [24.966, 112.0, (- 18.214)]]) / 255.0) + [16, 128, 128])
if (in_img_type == np.uint8):
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) |
def bgr2ycbcr(img, only_y=True, separate=False):
'bgr version of matlab rgb2ycbcr\n Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has\n different parameters with MATLAB color convertion.\n only_y: only return Y channel\n separate: if true, will returng the channels as\n separate images\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '
in_img_type = img.dtype
img_ = img.astype(np.float32)
if (in_img_type != np.uint8):
img_ *= 255.0
if only_y:
rlt = ((np.dot(img_, [24.966, 128.553, 65.481]) / 255.0) + 16.0)
else:
rlt = ((np.matmul(img_, [[24.966, 112.0, (- 18.214)], [128.553, (- 74.203), (- 93.786)], [65.481, (- 37.797), 112.0]]) / 255.0) + [16, 128, 128])
if (in_img_type == np.uint8):
rlt = rlt.round()
else:
rlt /= 255.0
if separate:
rlt = rlt.astype(in_img_type)
return (rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2])
else:
return rlt.astype(in_img_type) | -7,023,631,035,229,426,000 | bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1] | mmedit/models/inpaintors/vic/common.py | bgr2ycbcr | f74066357/Image_Inpainting | python | def bgr2ycbcr(img, only_y=True, separate=False):
'bgr version of matlab rgb2ycbcr\n Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has\n different parameters with MATLAB color convertion.\n only_y: only return Y channel\n separate: if true, will returng the channels as\n separate images\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '
in_img_type = img.dtype
img_ = img.astype(np.float32)
if (in_img_type != np.uint8):
img_ *= 255.0
if only_y:
rlt = ((np.dot(img_, [24.966, 128.553, 65.481]) / 255.0) + 16.0)
else:
rlt = ((np.matmul(img_, [[24.966, 112.0, (- 18.214)], [128.553, (- 74.203), (- 93.786)], [65.481, (- 37.797), 112.0]]) / 255.0) + [16, 128, 128])
if (in_img_type == np.uint8):
rlt = rlt.round()
else:
rlt /= 255.0
if separate:
rlt = rlt.astype(in_img_type)
return (rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2])
else:
return rlt.astype(in_img_type) |
def ycbcr2rgb(img, only_y=True):
'\n bgr version of matlab ycbcr2rgb\n Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has\n different parameters to MATLAB color convertion.\n\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '
in_img_type = img.dtype
img_ = img.astype(np.float32)
if (in_img_type != np.uint8):
img_ *= 255.0
mat = np.array([[24.966, 128.553, 65.481], [112, (- 74.203), (- 37.797)], [(- 18.214), (- 93.786), 112.0]])
mat = (np.linalg.inv(mat.T) * 255)
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
if (in_img_type == np.uint8):
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) | -428,444,737,155,177,860 | bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1] | mmedit/models/inpaintors/vic/common.py | ycbcr2rgb | f74066357/Image_Inpainting | python | def ycbcr2rgb(img, only_y=True):
'\n bgr version of matlab ycbcr2rgb\n Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has\n different parameters to MATLAB color convertion.\n\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '
in_img_type = img.dtype
img_ = img.astype(np.float32)
if (in_img_type != np.uint8):
img_ *= 255.0
mat = np.array([[24.966, 128.553, 65.481], [112, (- 74.203), (- 37.797)], [(- 18.214), (- 93.786), 112.0]])
mat = (np.linalg.inv(mat.T) * 255)
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
if (in_img_type == np.uint8):
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type) |
def denorm(x, min_max=((- 1.0), 1.0)):
'\n Denormalize from [-1,1] range to [0,1]\n formula: xi\' = (xi - mu)/sigma\n Example: "out = (x + 1.0) / 2.0" for denorm\n range (-1,1) to (0,1)\n for use with proper act in Generator output (ie. tanh)\n '
out = ((x - min_max[0]) / (min_max[1] - min_max[0]))
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError('Got unexpected object type, expected torch.Tensor or np.ndarray') | 5,438,653,087,262,055,000 | Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh) | mmedit/models/inpaintors/vic/common.py | denorm | f74066357/Image_Inpainting | python | def denorm(x, min_max=((- 1.0), 1.0)):
'\n Denormalize from [-1,1] range to [0,1]\n formula: xi\' = (xi - mu)/sigma\n Example: "out = (x + 1.0) / 2.0" for denorm\n range (-1,1) to (0,1)\n for use with proper act in Generator output (ie. tanh)\n '
out = ((x - min_max[0]) / (min_max[1] - min_max[0]))
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError('Got unexpected object type, expected torch.Tensor or np.ndarray') |
def np2tensor(img, bgr2rgb=True, data_range=1.0, normalize=False, change_range=True, add_batch=True):
'\n Converts a numpy image array into a Tensor array.\n Parameters:\n img (numpy array): the input image numpy array\n add_batch (bool): choose if new tensor needs batch dimension added\n '
if (not isinstance(img, np.ndarray)):
raise TypeError('Got unexpected object type, expected np.ndarray')
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = ((img * data_range) / info(img.dtype).max)
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()
if bgr2rgb:
if (img.shape[0] == 3):
img = bgr_to_rgb(img)
elif (img.shape[0] == 4):
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0)
if normalize:
img = norm(img)
return img | -1,375,393,900,745,936,400 | Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added | mmedit/models/inpaintors/vic/common.py | np2tensor | f74066357/Image_Inpainting | python | def np2tensor(img, bgr2rgb=True, data_range=1.0, normalize=False, change_range=True, add_batch=True):
'\n Converts a numpy image array into a Tensor array.\n Parameters:\n img (numpy array): the input image numpy array\n add_batch (bool): choose if new tensor needs batch dimension added\n '
if (not isinstance(img, np.ndarray)):
raise TypeError('Got unexpected object type, expected np.ndarray')
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = ((img * data_range) / info(img.dtype).max)
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()
if bgr2rgb:
if (img.shape[0] == 3):
img = bgr_to_rgb(img)
elif (img.shape[0] == 4):
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0)
if normalize:
img = norm(img)
return img |
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255, denormalize=False, change_range=True, imtype=np.uint8):
'\n Converts a Tensor array into a numpy image array.\n Parameters:\n img (tensor): the input image tensor array\n 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed\n denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]\n imtype (type): the desired type of the converted numpy array (np.uint8\n default)\n Output:\n img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n '
if (not isinstance(img, torch.Tensor)):
raise TypeError('Got unexpected object type, expected torch.Tensor')
n_dim = img.dim()
img = img.float().cpu()
if ((n_dim == 4) or (n_dim == 3)):
if ((n_dim == 4) and remove_batch):
if (img.shape[0] > 1):
img = img[(0, ...)]
else:
img = img.squeeze()
if (len(img.shape) < 3):
img = img.unsqueeze(dim=0)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if ((img.shape[0] == 3) and rgb2bgr):
img_np = rgb_to_bgr(img).numpy()
elif ((img.shape[0] == 4) and rgb2bgr):
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0))
elif (n_dim == 2):
img_np = img.numpy()
else:
raise TypeError('Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if denormalize:
img_np = denorm(img_np)
if change_range:
img_np = np.clip((data_range * img_np), 0, data_range).round()
return img_np.astype(imtype) | -3,020,553,353,486,757,000 | Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) | mmedit/models/inpaintors/vic/common.py | tensor2np | f74066357/Image_Inpainting | python | def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255, denormalize=False, change_range=True, imtype=np.uint8):
'\n Converts a Tensor array into a numpy image array.\n Parameters:\n img (tensor): the input image tensor array\n 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed\n denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]\n imtype (type): the desired type of the converted numpy array (np.uint8\n default)\n Output:\n img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n '
if (not isinstance(img, torch.Tensor)):
raise TypeError('Got unexpected object type, expected torch.Tensor')
n_dim = img.dim()
img = img.float().cpu()
if ((n_dim == 4) or (n_dim == 3)):
if ((n_dim == 4) and remove_batch):
if (img.shape[0] > 1):
img = img[(0, ...)]
else:
img = img.squeeze()
if (len(img.shape) < 3):
img = img.unsqueeze(dim=0)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if ((img.shape[0] == 3) and rgb2bgr):
img_np = rgb_to_bgr(img).numpy()
elif ((img.shape[0] == 4) and rgb2bgr):
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0))
elif (n_dim == 2):
img_np = img.numpy()
else:
raise TypeError('Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if denormalize:
img_np = denorm(img_np)
if change_range:
img_np = np.clip((data_range * img_np), 0, data_range).round()
return img_np.astype(imtype) |
def hilo(high, low, close, high_length=None, low_length=None, mamode=None, offset=None, **kwargs):
'Indicator: Gann HiLo (HiLo)'
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
high_length = (int(high_length) if (high_length and (high_length > 0)) else 13)
low_length = (int(low_length) if (low_length and (low_length > 0)) else 21)
mamode = (mamode.lower() if isinstance(mamode, str) else 'sma')
offset = get_offset(offset)
m = close.size
hilo = Series(npNaN, index=close.index)
long = Series(npNaN, index=close.index)
short = Series(npNaN, index=close.index)
high_ma = ma(mamode, high, length=high_length)
low_ma = ma(mamode, low, length=low_length)
for i in range(1, m):
if (close.iloc[i] > high_ma.iloc[(i - 1)]):
hilo.iloc[i] = long.iloc[i] = low_ma.iloc[i]
elif (close.iloc[i] < low_ma.iloc[(i - 1)]):
hilo.iloc[i] = short.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[(i - 1)]
long.iloc[i] = short.iloc[i] = hilo.iloc[(i - 1)]
if (offset != 0):
hilo = hilo.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
if ('fillna' in kwargs):
hilo.fillna(kwargs['fillna'], inplace=True)
long.fillna(kwargs['fillna'], inplace=True)
short.fillna(kwargs['fillna'], inplace=True)
if ('fill_method' in kwargs):
hilo.fillna(method=kwargs['fill_method'], inplace=True)
long.fillna(method=kwargs['fill_method'], inplace=True)
short.fillna(method=kwargs['fill_method'], inplace=True)
_props = f'_{high_length}_{low_length}'
data = {f'HILO{_props}': hilo, f'HILOl{_props}': long, f'HILOs{_props}': short}
df = DataFrame(data, index=close.index)
df.name = f'HILO{_props}'
df.category = 'overlap'
return df | -493,136,803,108,728,260 | Indicator: Gann HiLo (HiLo) | pandas_ta/overlap/hilo.py | hilo | MyBourse/pandas-ta | python | def hilo(high, low, close, high_length=None, low_length=None, mamode=None, offset=None, **kwargs):
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
high_length = (int(high_length) if (high_length and (high_length > 0)) else 13)
low_length = (int(low_length) if (low_length and (low_length > 0)) else 21)
mamode = (mamode.lower() if isinstance(mamode, str) else 'sma')
offset = get_offset(offset)
m = close.size
hilo = Series(npNaN, index=close.index)
long = Series(npNaN, index=close.index)
short = Series(npNaN, index=close.index)
high_ma = ma(mamode, high, length=high_length)
low_ma = ma(mamode, low, length=low_length)
for i in range(1, m):
if (close.iloc[i] > high_ma.iloc[(i - 1)]):
hilo.iloc[i] = long.iloc[i] = low_ma.iloc[i]
elif (close.iloc[i] < low_ma.iloc[(i - 1)]):
hilo.iloc[i] = short.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[(i - 1)]
long.iloc[i] = short.iloc[i] = hilo.iloc[(i - 1)]
if (offset != 0):
hilo = hilo.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
if ('fillna' in kwargs):
hilo.fillna(kwargs['fillna'], inplace=True)
long.fillna(kwargs['fillna'], inplace=True)
short.fillna(kwargs['fillna'], inplace=True)
if ('fill_method' in kwargs):
hilo.fillna(method=kwargs['fill_method'], inplace=True)
long.fillna(method=kwargs['fill_method'], inplace=True)
short.fillna(method=kwargs['fill_method'], inplace=True)
_props = f'_{high_length}_{low_length}'
data = {f'HILO{_props}': hilo, f'HILOl{_props}': long, f'HILOs{_props}': short}
df = DataFrame(data, index=close.index)
df.name = f'HILO{_props}'
df.category = 'overlap'
return df |
def test_output_data(mp_tmpdir):
'Check GeoTIFF as output data.'
output_params = dict(grid='geodetic', format='GeoTIFF', path=mp_tmpdir, pixelbuffer=0, metatiling=1, bands=1, dtype='int16', delimiters=dict(bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0), effective_bounds=Bounds((- 180.439453125), (- 90.0), 180.439453125, 90.0), zoom=[5], process_bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0)))
output = gtiff.OutputDataWriter(output_params)
assert (output.path == mp_tmpdir)
assert (output.file_extension == '.tif')
tp = BufferedTilePyramid('geodetic')
tile = tp.tile(5, 5, 5)
assert (output.get_path(tile) == os.path.join(*[mp_tmpdir, '5', '5', ('5' + '.tif')]))
try:
temp_dir = os.path.join(*[mp_tmpdir, '5', '5'])
output.prepare_path(tile)
assert os.path.isdir(temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
assert isinstance(output.profile(tile), dict)
try:
data = (np.ones(((1,) + tile.shape)) * 128)
output.write(tile, data)
assert output.tiles_exist(tile)
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert (not data[0].mask.any())
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert data[0].mask.all()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
empty = output.empty(tile)
assert isinstance(empty, ma.MaskedArray)
assert (not empty.any())
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
output_params.update(compress='deflate', predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert (output.profile(tile)['compress'] == 'deflate')
assert (output.profile(tile)['predictor'] == 2)
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
with pytest.deprecated_call():
output_params.update(compression='deflate', predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert (output.profile(tile)['compress'] == 'deflate')
assert (output.profile(tile)['predictor'] == 2)
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True) | -6,277,988,863,151,543,000 | Check GeoTIFF as output data. | test/test_formats_geotiff.py | test_output_data | Scartography/mapchete | python | def test_output_data(mp_tmpdir):
output_params = dict(grid='geodetic', format='GeoTIFF', path=mp_tmpdir, pixelbuffer=0, metatiling=1, bands=1, dtype='int16', delimiters=dict(bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0), effective_bounds=Bounds((- 180.439453125), (- 90.0), 180.439453125, 90.0), zoom=[5], process_bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0)))
output = gtiff.OutputDataWriter(output_params)
assert (output.path == mp_tmpdir)
assert (output.file_extension == '.tif')
tp = BufferedTilePyramid('geodetic')
tile = tp.tile(5, 5, 5)
assert (output.get_path(tile) == os.path.join(*[mp_tmpdir, '5', '5', ('5' + '.tif')]))
try:
temp_dir = os.path.join(*[mp_tmpdir, '5', '5'])
output.prepare_path(tile)
assert os.path.isdir(temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
assert isinstance(output.profile(tile), dict)
try:
data = (np.ones(((1,) + tile.shape)) * 128)
output.write(tile, data)
assert output.tiles_exist(tile)
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert (not data[0].mask.any())
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert data[0].mask.all()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
empty = output.empty(tile)
assert isinstance(empty, ma.MaskedArray)
assert (not empty.any())
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
output_params.update(compress='deflate', predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert (output.profile(tile)['compress'] == 'deflate')
assert (output.profile(tile)['predictor'] == 2)
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
with pytest.deprecated_call():
output_params.update(compression='deflate', predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert (output.profile(tile)['compress'] == 'deflate')
assert (output.profile(tile)['predictor'] == 2)
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True) |
def test_for_web(client, mp_tmpdir):
'Send GTiff via flask.'
tile_base_url = '/wmts_simple/1.0.0/cleantopo_br/default/WGS84/'
for url in ['/']:
response = client.get(url)
assert (response.status_code == 200)
for url in [(tile_base_url + '5/30/62.tif'), (tile_base_url + '5/30/63.tif'), (tile_base_url + '5/31/62.tif'), (tile_base_url + '5/31/63.tif')]:
response = client.get(url)
assert (response.status_code == 200)
img = response.data
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with MemoryFile(img) as memfile:
with memfile.open() as dataset:
assert dataset.read().any() | -5,188,252,424,518,334,000 | Send GTiff via flask. | test/test_formats_geotiff.py | test_for_web | Scartography/mapchete | python | def test_for_web(client, mp_tmpdir):
tile_base_url = '/wmts_simple/1.0.0/cleantopo_br/default/WGS84/'
for url in ['/']:
response = client.get(url)
assert (response.status_code == 200)
for url in [(tile_base_url + '5/30/62.tif'), (tile_base_url + '5/30/63.tif'), (tile_base_url + '5/31/62.tif'), (tile_base_url + '5/31/63.tif')]:
response = client.get(url)
assert (response.status_code == 200)
img = response.data
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with MemoryFile(img) as memfile:
with memfile.open() as dataset:
assert dataset.read().any() |
def test_input_data(mp_tmpdir, cleantopo_br):
'Check GeoTIFF proces output as input data.'
with mapchete.open(cleantopo_br.path) as mp:
tp = BufferedTilePyramid('geodetic')
tile = tp.tile(5, 5, 5)
output_params = dict(grid='geodetic', format='GeoTIFF', path=mp_tmpdir, pixelbuffer=0, metatiling=1, bands=2, dtype='int16', delimiters=dict(bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0), effective_bounds=Bounds((- 180.439453125), (- 90.0), 180.439453125, 90.0), zoom=[5], process_bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0)))
output = gtiff.OutputDataWriter(output_params)
with output.open(tile, mp) as input_tile:
for data in [input_tile.read(), input_tile.read(1), input_tile.read([1])]:
assert isinstance(data, ma.masked_array)
assert input_tile.is_empty()
with output.open(tile, mp) as input_tile:
pass | -3,211,887,975,729,637,000 | Check GeoTIFF proces output as input data. | test/test_formats_geotiff.py | test_input_data | Scartography/mapchete | python | def test_input_data(mp_tmpdir, cleantopo_br):
with mapchete.open(cleantopo_br.path) as mp:
tp = BufferedTilePyramid('geodetic')
tile = tp.tile(5, 5, 5)
output_params = dict(grid='geodetic', format='GeoTIFF', path=mp_tmpdir, pixelbuffer=0, metatiling=1, bands=2, dtype='int16', delimiters=dict(bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0), effective_bounds=Bounds((- 180.439453125), (- 90.0), 180.439453125, 90.0), zoom=[5], process_bounds=Bounds((- 180.0), (- 90.0), 180.0, 90.0)))
output = gtiff.OutputDataWriter(output_params)
with output.open(tile, mp) as input_tile:
for data in [input_tile.read(), input_tile.read(1), input_tile.read([1])]:
assert isinstance(data, ma.masked_array)
assert input_tile.is_empty()
with output.open(tile, mp) as input_tile:
pass |
def test_write_geotiff_tags(mp_tmpdir, cleantopo_br, write_rasterfile_tags_py):
'Pass on metadata tags from user process to rasterio.'
conf = dict(**cleantopo_br.dict)
conf.update(process=write_rasterfile_tags_py)
with mapchete.open(conf) as mp:
for tile in mp.get_process_tiles():
(data, tags) = mp.execute(tile)
assert data.any()
assert isinstance(tags, dict)
mp.write(process_tile=tile, data=(data, tags))
out_path = mp.config.output.get_path(tile)
with rasterio.open(out_path) as src:
assert ('filewide_tag' in src.tags())
assert (src.tags()['filewide_tag'] == 'value')
assert ('band_tag' in src.tags(1))
assert (src.tags(1)['band_tag'] == 'True') | 5,183,305,310,551,301,000 | Pass on metadata tags from user process to rasterio. | test/test_formats_geotiff.py | test_write_geotiff_tags | Scartography/mapchete | python | def test_write_geotiff_tags(mp_tmpdir, cleantopo_br, write_rasterfile_tags_py):
conf = dict(**cleantopo_br.dict)
conf.update(process=write_rasterfile_tags_py)
with mapchete.open(conf) as mp:
for tile in mp.get_process_tiles():
(data, tags) = mp.execute(tile)
assert data.any()
assert isinstance(tags, dict)
mp.write(process_tile=tile, data=(data, tags))
out_path = mp.config.output.get_path(tile)
with rasterio.open(out_path) as src:
assert ('filewide_tag' in src.tags())
assert (src.tags()['filewide_tag'] == 'value')
assert ('band_tag' in src.tags(1))
assert (src.tags(1)['band_tag'] == 'True') |
@pytest.mark.remote
def test_s3_write_output_data(gtiff_s3, s3_example_tile, mp_s3_tmpdir):
'Write and read output.'
with mapchete.open(gtiff_s3.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*s3_example_tile)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert (not mp.config.output.tiles_exist(process_tile))
mp.batch_process(tile=process_tile.id)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert (not data[0].mask.all()) | 3,358,115,100,684,328,000 | Write and read output. | test/test_formats_geotiff.py | test_s3_write_output_data | Scartography/mapchete | python | @pytest.mark.remote
def test_s3_write_output_data(gtiff_s3, s3_example_tile, mp_s3_tmpdir):
with mapchete.open(gtiff_s3.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*s3_example_tile)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert (not mp.config.output.tiles_exist(process_tile))
mp.batch_process(tile=process_tile.id)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert (not data[0].mask.all()) |
def no_translate_debug_logs(logical_line, filename):
"Check for 'LOG.debug(_('\n\n As per our translation policy,\n https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation\n we shouldn't translate debug level logs.\n\n * This check assumes that 'LOG' is a logger.\n * Use filename so we can start enforcing this in specific folders instead\n of needing to do so all at once.\n\n M319\n "
if logical_line.startswith('LOG.debug(_('):
(yield (0, "M319 Don't translate debug level logs")) | 3,433,835,137,199,243,000 | Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
M319 | manila/hacking/checks.py | no_translate_debug_logs | scality/manila | python | def no_translate_debug_logs(logical_line, filename):
"Check for 'LOG.debug(_('\n\n As per our translation policy,\n https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation\n we shouldn't translate debug level logs.\n\n * This check assumes that 'LOG' is a logger.\n * Use filename so we can start enforcing this in specific folders instead\n of needing to do so all at once.\n\n M319\n "
if logical_line.startswith('LOG.debug(_('):
(yield (0, "M319 Don't translate debug level logs")) |
def check_explicit_underscore_import(logical_line, filename):
"Check for explicit import of the _ function\n\n We need to ensure that any files that are using the _() function\n to translate logs are explicitly importing the _ function. We\n can't trust unit test to catch whether the import has been\n added so we need to check for it here.\n "
if (filename in UNDERSCORE_IMPORT_FILES):
pass
elif (underscore_import_check.match(logical_line) or custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or string_translation.match(logical_line)):
(yield (0, 'M323: Found use of _() without explicit import of _ !')) | 7,424,160,099,128,621,000 | Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here. | manila/hacking/checks.py | check_explicit_underscore_import | scality/manila | python | def check_explicit_underscore_import(logical_line, filename):
"Check for explicit import of the _ function\n\n We need to ensure that any files that are using the _() function\n to translate logs are explicitly importing the _ function. We\n can't trust unit test to catch whether the import has been\n added so we need to check for it here.\n "
if (filename in UNDERSCORE_IMPORT_FILES):
pass
elif (underscore_import_check.match(logical_line) or custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or string_translation.match(logical_line)):
(yield (0, 'M323: Found use of _() without explicit import of _ !')) |
def __init__(self, tree, filename):
'This object is created automatically by pep8.\n\n :param tree: an AST tree\n :param filename: name of the file being analyzed\n (ignored by our checks)\n '
self._tree = tree
self._errors = [] | -3,440,636,442,667,781,600 | This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks) | manila/hacking/checks.py | __init__ | scality/manila | python | def __init__(self, tree, filename):
'This object is created automatically by pep8.\n\n :param tree: an AST tree\n :param filename: name of the file being analyzed\n (ignored by our checks)\n '
self._tree = tree
self._errors = [] |
def run(self):
'Called automatically by pep8.'
self.visit(self._tree)
return self._errors | -4,209,563,323,373,041,000 | Called automatically by pep8. | manila/hacking/checks.py | run | scality/manila | python | def run(self):
self.visit(self._tree)
return self._errors |
def add_error(self, node, message=None):
'Add an error caused by a node to the list of errors for pep8.'
message = (message or self.CHECK_DESC)
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error) | -9,018,306,392,635,961,000 | Add an error caused by a node to the list of errors for pep8. | manila/hacking/checks.py | add_error | scality/manila | python | def add_error(self, node, message=None):
message = (message or self.CHECK_DESC)
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error) |
def choisir_matelots(self, exception=None):
'Retourne le matelot le plus apte à accomplir la volonté.'
proches = []
matelots = self.navire.equipage.get_matelots_libres(exception)
graph = self.navire.graph
gouvernail = self.navire.gouvernail
if ((gouvernail is None) or (gouvernail.tenu is not None)):
return None
for matelot in matelots:
origine = matelot.salle.mnemonic
destination = gouvernail.parent.mnemonic
if (origine == destination):
proches.append((matelot, [], gouvernail))
else:
chemin = graph.get((origine, destination))
if chemin:
proches.append((matelot, chemin, gouvernail))
proches = sorted([couple for couple in proches], key=(lambda couple: len(couple[1])))
if proches:
return proches[0]
return None | -7,845,727,070,930,788,000 | Retourne le matelot le plus apte à accomplir la volonté. | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | choisir_matelots | stormi/tsunami | python | def choisir_matelots(self, exception=None):
proches = []
matelots = self.navire.equipage.get_matelots_libres(exception)
graph = self.navire.graph
gouvernail = self.navire.gouvernail
if ((gouvernail is None) or (gouvernail.tenu is not None)):
return None
for matelot in matelots:
origine = matelot.salle.mnemonic
destination = gouvernail.parent.mnemonic
if (origine == destination):
proches.append((matelot, [], gouvernail))
else:
chemin = graph.get((origine, destination))
if chemin:
proches.append((matelot, chemin, gouvernail))
proches = sorted([couple for couple in proches], key=(lambda couple: len(couple[1])))
if proches:
return proches[0]
return None |
def executer(self, sequence):
'Exécute la volonté.'
if (sequence is None):
self.terminer()
return
(matelot, sorties, gouvernail) = sequence
navire = self.navire
ordres = []
if sorties:
aller = LongDeplacer(matelot, navire, *sorties)
ordres.append(aller)
tenir = OrdreTenirGouvernail(matelot, navire)
ordres.append(tenir)
self.ajouter_ordres(matelot, ordres) | -8,178,484,088,887,182,000 | Exécute la volonté. | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | executer | stormi/tsunami | python | def executer(self, sequence):
if (sequence is None):
self.terminer()
return
(matelot, sorties, gouvernail) = sequence
navire = self.navire
ordres = []
if sorties:
aller = LongDeplacer(matelot, navire, *sorties)
ordres.append(aller)
tenir = OrdreTenirGouvernail(matelot, navire)
ordres.append(tenir)
self.ajouter_ordres(matelot, ordres) |
def crier_ordres(self, personnage):
"On fait crier l'ordre au personnage."
msg = "{} s'écrie : un homme à la barre !".format(personnage.distinction_audible)
self.navire.envoyer(msg) | 8,899,339,267,492,573,000 | On fait crier l'ordre au personnage. | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | crier_ordres | stormi/tsunami | python | def crier_ordres(self, personnage):
msg = "{} s'écrie : un homme à la barre !".format(personnage.distinction_audible)
self.navire.envoyer(msg) |
@classmethod
def extraire_arguments(cls, navire):
'Extrait les arguments de la volonté.'
return () | -4,547,812,160,985,041,000 | Extrait les arguments de la volonté. | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | extraire_arguments | stormi/tsunami | python | @classmethod
def extraire_arguments(cls, navire):
return () |
def get_usgs_data(station_id, start_date, end_date, parameter='00060', cache_dir=None):
"Get river discharge data from the USGS REST web service.\n\n See `U.S. Geological Survey Water Services\n <https://waterservices.usgs.gov/>`_ (USGS)\n\n Parameters\n ----------\n station_id : str\n The station id to get\n start_date : str\n String for start date in the format: 'YYYY-MM-dd', e.g. '1980-01-01'\n end_date : str\n String for start date in the format: 'YYYY-MM-dd', e.g. '2018-12-31'\n parameter : str\n The parameter code to get, e.g. ('00060') discharge, cubic feet per second\n cache_dir : str\n Directory where files retrieved from the web service are cached.\n If set to None then USGS_DATA_HOME env var will be used as cache directory.\n\n Examples\n --------\n >>> from ewatercycle.observation.usgs import get_usgs_data\n >>> data = get_usgs_data('03109500', '2000-01-01', '2000-12-31', cache_dir='.')\n >>> data\n <xarray.Dataset>\n Dimensions: (time: 8032)\n Coordinates:\n * time (time) datetime64[ns] 2000-01-04T05:00:00 ... 2000-12-23T04:00:00\n Data variables:\n Streamflow (time) float32 8.296758 10.420501 ... 10.647034 11.694747\n Attributes:\n title: USGS Data from streamflow data\n station: Little Beaver Creek near East Liverpool OH\n stationid: 03109500\n location: (40.6758974, -80.5406244)\n "
if (cache_dir is None):
cache_dir = os.environ['USGS_DATA_HOME']
netcdf = os.path.join(cache_dir, (((((((('USGS_' + station_id) + '_') + parameter) + '_') + start_date) + '_') + end_date) + '.nc'))
if os.path.exists(netcdf):
return xr.open_dataset(netcdf)
out = os.path.join(cache_dir, (((((((('USGS_' + station_id) + '_') + parameter) + '_') + start_date) + '_') + end_date) + '.wml'))
if (not os.path.exists(out)):
collector = UsgsRest()
collector.filter(start=datetime.strptime(start_date, '%Y-%m-%d'), end=datetime.strptime(end_date, '%Y-%m-%d'), variables=[parameter], features=[station_id])
data = collector.raw()
with open(out, 'w') as file:
file.write(data)
collector.clear()
else:
with open(out, 'r') as file:
data = file.read()
data = WaterML11ToPaegan(data).feature
if (len(data.elements) == 0):
raise ValueError('Data does not contain any station data')
else:
station = data.elements[0]
values = np.array([(float(point.members[0]['value']) / 35.315) for point in station.elements], dtype=np.float32)
times = [point.time for point in station.elements]
attrs = {'units': 'cubic meters per second'}
ds = xr.Dataset({'streamflow': (['time'], values, attrs)}, coords={'time': times})
ds.attrs['title'] = 'USGS Data from streamflow data'
ds.attrs['station'] = station.name
ds.attrs['stationid'] = station.get_uid()
ds.attrs['location'] = (station.location.y, station.location.x)
ds.to_netcdf(netcdf)
return ds | 8,764,079,998,482,199,000 | Get river discharge data from the USGS REST web service.
See `U.S. Geological Survey Water Services
<https://waterservices.usgs.gov/>`_ (USGS)
Parameters
----------
station_id : str
The station id to get
start_date : str
String for start date in the format: 'YYYY-MM-dd', e.g. '1980-01-01'
end_date : str
String for start date in the format: 'YYYY-MM-dd', e.g. '2018-12-31'
parameter : str
The parameter code to get, e.g. ('00060') discharge, cubic feet per second
cache_dir : str
Directory where files retrieved from the web service are cached.
If set to None then USGS_DATA_HOME env var will be used as cache directory.
Examples
--------
>>> from ewatercycle.observation.usgs import get_usgs_data
>>> data = get_usgs_data('03109500', '2000-01-01', '2000-12-31', cache_dir='.')
>>> data
<xarray.Dataset>
Dimensions: (time: 8032)
Coordinates:
* time (time) datetime64[ns] 2000-01-04T05:00:00 ... 2000-12-23T04:00:00
Data variables:
Streamflow (time) float32 8.296758 10.420501 ... 10.647034 11.694747
Attributes:
title: USGS Data from streamflow data
station: Little Beaver Creek near East Liverpool OH
stationid: 03109500
location: (40.6758974, -80.5406244) | src/ewatercycle/observation/usgs.py | get_usgs_data | cffbots/ewatercycle | python | def get_usgs_data(station_id, start_date, end_date, parameter='00060', cache_dir=None):
"Get river discharge data from the USGS REST web service.\n\n See `U.S. Geological Survey Water Services\n <https://waterservices.usgs.gov/>`_ (USGS)\n\n Parameters\n ----------\n station_id : str\n The station id to get\n start_date : str\n String for start date in the format: 'YYYY-MM-dd', e.g. '1980-01-01'\n end_date : str\n String for start date in the format: 'YYYY-MM-dd', e.g. '2018-12-31'\n parameter : str\n The parameter code to get, e.g. ('00060') discharge, cubic feet per second\n cache_dir : str\n Directory where files retrieved from the web service are cached.\n If set to None then USGS_DATA_HOME env var will be used as cache directory.\n\n Examples\n --------\n >>> from ewatercycle.observation.usgs import get_usgs_data\n >>> data = get_usgs_data('03109500', '2000-01-01', '2000-12-31', cache_dir='.')\n >>> data\n <xarray.Dataset>\n Dimensions: (time: 8032)\n Coordinates:\n * time (time) datetime64[ns] 2000-01-04T05:00:00 ... 2000-12-23T04:00:00\n Data variables:\n Streamflow (time) float32 8.296758 10.420501 ... 10.647034 11.694747\n Attributes:\n title: USGS Data from streamflow data\n station: Little Beaver Creek near East Liverpool OH\n stationid: 03109500\n location: (40.6758974, -80.5406244)\n "
if (cache_dir is None):
cache_dir = os.environ['USGS_DATA_HOME']
netcdf = os.path.join(cache_dir, (((((((('USGS_' + station_id) + '_') + parameter) + '_') + start_date) + '_') + end_date) + '.nc'))
if os.path.exists(netcdf):
return xr.open_dataset(netcdf)
out = os.path.join(cache_dir, (((((((('USGS_' + station_id) + '_') + parameter) + '_') + start_date) + '_') + end_date) + '.wml'))
if (not os.path.exists(out)):
collector = UsgsRest()
collector.filter(start=datetime.strptime(start_date, '%Y-%m-%d'), end=datetime.strptime(end_date, '%Y-%m-%d'), variables=[parameter], features=[station_id])
data = collector.raw()
with open(out, 'w') as file:
file.write(data)
collector.clear()
else:
with open(out, 'r') as file:
data = file.read()
data = WaterML11ToPaegan(data).feature
if (len(data.elements) == 0):
raise ValueError('Data does not contain any station data')
else:
station = data.elements[0]
values = np.array([(float(point.members[0]['value']) / 35.315) for point in station.elements], dtype=np.float32)
times = [point.time for point in station.elements]
attrs = {'units': 'cubic meters per second'}
ds = xr.Dataset({'streamflow': (['time'], values, attrs)}, coords={'time': times})
ds.attrs['title'] = 'USGS Data from streamflow data'
ds.attrs['station'] = station.name
ds.attrs['stationid'] = station.get_uid()
ds.attrs['location'] = (station.location.y, station.location.x)
ds.to_netcdf(netcdf)
return ds |
def create_token(self, body, **kwargs):
'Create token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_token(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1Token body: Token body (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.create_token_with_http_info(body, **kwargs) | -8,848,656,419,021,497,000 | Create token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | create_token | deeplearning2012/polyaxon | python | def create_token(self, body, **kwargs):
'Create token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_token(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1Token body: Token body (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.create_token_with_http_info(body, **kwargs) |
def create_token_with_http_info(self, body, **kwargs):
'Create token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_token_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1Token body: Token body (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method create_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `create_token`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 8,630,615,746,013,322,000 | Create token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | create_token_with_http_info | deeplearning2012/polyaxon | python | def create_token_with_http_info(self, body, **kwargs):
'Create token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_token_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1Token body: Token body (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method create_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `create_token`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def delete_token(self, uuid, **kwargs):
'Delete token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_token(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.delete_token_with_http_info(uuid, **kwargs) | -4,694,829,903,950,469,000 | Delete token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | delete_token | deeplearning2012/polyaxon | python | def delete_token(self, uuid, **kwargs):
'Delete token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_token(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.delete_token_with_http_info(uuid, **kwargs) |
def delete_token_with_http_info(self, uuid, **kwargs):
'Delete token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_token_with_http_info(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['uuid']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method delete_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('uuid' not in local_var_params) or (local_var_params['uuid'] is None))):
raise ApiValueError('Missing the required parameter `uuid` when calling `delete_token`')
collection_formats = {}
path_params = {}
if ('uuid' in local_var_params):
path_params['uuid'] = local_var_params['uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 3,539,230,458,003,127,300 | Delete token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | delete_token_with_http_info | deeplearning2012/polyaxon | python | def delete_token_with_http_info(self, uuid, **kwargs):
'Delete token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_token_with_http_info(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['uuid']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method delete_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('uuid' not in local_var_params) or (local_var_params['uuid'] is None))):
raise ApiValueError('Missing the required parameter `uuid` when calling `delete_token`')
collection_formats = {}
path_params = {}
if ('uuid' in local_var_params):
path_params['uuid'] = local_var_params['uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def get_token(self, uuid, **kwargs):
'Get token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_token(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.get_token_with_http_info(uuid, **kwargs) | -8,607,238,751,932,570,000 | Get token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_token(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | get_token | deeplearning2012/polyaxon | python | def get_token(self, uuid, **kwargs):
'Get token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_token(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.get_token_with_http_info(uuid, **kwargs) |
def get_token_with_http_info(self, uuid, **kwargs):
'Get token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_token_with_http_info(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['uuid']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method get_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('uuid' not in local_var_params) or (local_var_params['uuid'] is None))):
raise ApiValueError('Missing the required parameter `uuid` when calling `get_token`')
collection_formats = {}
path_params = {}
if ('uuid' in local_var_params):
path_params['uuid'] = local_var_params['uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | -2,203,539,906,283,325,000 | Get token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_token_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | get_token_with_http_info | deeplearning2012/polyaxon | python | def get_token_with_http_info(self, uuid, **kwargs):
'Get token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_token_with_http_info(uuid, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str uuid: UUid of the namespace (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['uuid']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method get_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('uuid' not in local_var_params) or (local_var_params['uuid'] is None))):
raise ApiValueError('Missing the required parameter `uuid` when calling `get_token`')
collection_formats = {}
path_params = {}
if ('uuid' in local_var_params):
path_params['uuid'] = local_var_params['uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def get_user(self, **kwargs):
'Get current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1User\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.get_user_with_http_info(**kwargs) | -753,648,831,554,459,300 | Get current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | get_user | deeplearning2012/polyaxon | python | def get_user(self, **kwargs):
'Get current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1User\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.get_user_with_http_info(**kwargs) |
def get_user_with_http_info(self, **kwargs):
'Get current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = []
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method get_user" % key))
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1User', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 2,460,918,540,667,456,000 | Get current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | get_user_with_http_info | deeplearning2012/polyaxon | python | def get_user_with_http_info(self, **kwargs):
'Get current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = []
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method get_user" % key))
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1User', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def list_tokens(self, **kwargs):
'List tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.list_tokens(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param int offset: Pagination offset.\n :param int limit: Limit size.\n :param str sort: Sort to order the search.\n :param str query: Query filter the search search.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1ListTokenResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.list_tokens_with_http_info(**kwargs) | -2,749,900,521,780,329,000 | List tokens # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tokens(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListTokenResponse
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | list_tokens | deeplearning2012/polyaxon | python | def list_tokens(self, **kwargs):
'List tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.list_tokens(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param int offset: Pagination offset.\n :param int limit: Limit size.\n :param str sort: Sort to order the search.\n :param str query: Query filter the search search.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1ListTokenResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.list_tokens_with_http_info(**kwargs) |
def list_tokens_with_http_info(self, **kwargs):
'List tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.list_tokens_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param int offset: Pagination offset.\n :param int limit: Limit size.\n :param str sort: Sort to order the search.\n :param str query: Query filter the search search.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1ListTokenResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['offset', 'limit', 'sort', 'query']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method list_tokens" % key))
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if (('offset' in local_var_params) and (local_var_params['offset'] is not None)):
query_params.append(('offset', local_var_params['offset']))
if (('limit' in local_var_params) and (local_var_params['limit'] is not None)):
query_params.append(('limit', local_var_params['limit']))
if (('sort' in local_var_params) and (local_var_params['sort'] is not None)):
query_params.append(('sort', local_var_params['sort']))
if (('query' in local_var_params) and (local_var_params['query'] is not None)):
query_params.append(('query', local_var_params['query']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1ListTokenResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 365,656,943,147,210,240 | List tokens # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tokens_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListTokenResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | list_tokens_with_http_info | deeplearning2012/polyaxon | python | def list_tokens_with_http_info(self, **kwargs):
'List tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.list_tokens_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param int offset: Pagination offset.\n :param int limit: Limit size.\n :param str sort: Sort to order the search.\n :param str query: Query filter the search search.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1ListTokenResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['offset', 'limit', 'sort', 'query']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method list_tokens" % key))
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if (('offset' in local_var_params) and (local_var_params['offset'] is not None)):
query_params.append(('offset', local_var_params['offset']))
if (('limit' in local_var_params) and (local_var_params['limit'] is not None)):
query_params.append(('limit', local_var_params['limit']))
if (('sort' in local_var_params) and (local_var_params['sort'] is not None)):
query_params.append(('sort', local_var_params['sort']))
if (('query' in local_var_params) and (local_var_params['query'] is not None)):
query_params.append(('query', local_var_params['query']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1ListTokenResponse', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def patch_token(self, token_uuid, body, **kwargs):
'Patch token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_token(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.patch_token_with_http_info(token_uuid, body, **kwargs) | 9,195,283,307,072,790,000 | Patch token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_token(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | patch_token | deeplearning2012/polyaxon | python | def patch_token(self, token_uuid, body, **kwargs):
'Patch token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_token(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.patch_token_with_http_info(token_uuid, body, **kwargs) |
def patch_token_with_http_info(self, token_uuid, body, **kwargs):
'Patch token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_token_with_http_info(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['token_uuid', 'body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method patch_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('token_uuid' not in local_var_params) or (local_var_params['token_uuid'] is None))):
raise ApiValueError('Missing the required parameter `token_uuid` when calling `patch_token`')
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `patch_token`')
collection_formats = {}
path_params = {}
if ('token_uuid' in local_var_params):
path_params['token.uuid'] = local_var_params['token_uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{token.uuid}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 1,879,753,834,479,708,200 | Patch token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_token_with_http_info(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | patch_token_with_http_info | deeplearning2012/polyaxon | python | def patch_token_with_http_info(self, token_uuid, body, **kwargs):
'Patch token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_token_with_http_info(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['token_uuid', 'body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method patch_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('token_uuid' not in local_var_params) or (local_var_params['token_uuid'] is None))):
raise ApiValueError('Missing the required parameter `token_uuid` when calling `patch_token`')
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `patch_token`')
collection_formats = {}
path_params = {}
if ('token_uuid' in local_var_params):
path_params['token.uuid'] = local_var_params['token_uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{token.uuid}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def patch_user(self, body, **kwargs):
'Patch current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_user(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1User\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.patch_user_with_http_info(body, **kwargs) | 6,286,542,450,307,694,000 | Patch current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_user(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | patch_user | deeplearning2012/polyaxon | python | def patch_user(self, body, **kwargs):
'Patch current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_user(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1User\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.patch_user_with_http_info(body, **kwargs) |
def patch_user_with_http_info(self, body, **kwargs):
'Patch current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_user_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method patch_user" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `patch_user`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1User', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 8,619,010,782,155,876,000 | Patch current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_user_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | patch_user_with_http_info | deeplearning2012/polyaxon | python | def patch_user_with_http_info(self, body, **kwargs):
'Patch current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_user_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method patch_user" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `patch_user`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1User', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def update_token(self, token_uuid, body, **kwargs):
'Update token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_token(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.update_token_with_http_info(token_uuid, body, **kwargs) | 287,668,840,200,222,080 | Update token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_token(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | update_token | deeplearning2012/polyaxon | python | def update_token(self, token_uuid, body, **kwargs):
'Update token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_token(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1Token\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.update_token_with_http_info(token_uuid, body, **kwargs) |
def update_token_with_http_info(self, token_uuid, body, **kwargs):
'Update token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_token_with_http_info(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['token_uuid', 'body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method update_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('token_uuid' not in local_var_params) or (local_var_params['token_uuid'] is None))):
raise ApiValueError('Missing the required parameter `token_uuid` when calling `update_token`')
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `update_token`')
collection_formats = {}
path_params = {}
if ('token_uuid' in local_var_params):
path_params['token.uuid'] = local_var_params['token_uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{token.uuid}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 4,044,100,548,066,564,600 | Update token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_token_with_http_info(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | update_token_with_http_info | deeplearning2012/polyaxon | python | def update_token_with_http_info(self, token_uuid, body, **kwargs):
'Update token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_token_with_http_info(token_uuid, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str token_uuid: UUID (required)\n :param V1Token body: Token body (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['token_uuid', 'body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method update_token" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('token_uuid' not in local_var_params) or (local_var_params['token_uuid'] is None))):
raise ApiValueError('Missing the required parameter `token_uuid` when calling `update_token`')
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `update_token`')
collection_formats = {}
path_params = {}
if ('token_uuid' in local_var_params):
path_params['token.uuid'] = local_var_params['token_uuid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users/tokens/{token.uuid}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Token', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def update_user(self, body, **kwargs):
'Update current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_user(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1User\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.update_user_with_http_info(body, **kwargs) | 7,695,697,686,336,718,000 | Update current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | update_user | deeplearning2012/polyaxon | python | def update_user(self, body, **kwargs):
'Update current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_user(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: V1User\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.update_user_with_http_info(body, **kwargs) |
def update_user_with_http_info(self, body, **kwargs):
'Update current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_user_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method update_user" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `update_user`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1User', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | -1,182,087,638,114,787,600 | Update current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | update_user_with_http_info | deeplearning2012/polyaxon | python | def update_user_with_http_info(self, body, **kwargs):
'Update current user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_user_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param V1User body: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['body']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method update_user" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('body' not in local_var_params) or (local_var_params['body'] is None))):
raise ApiValueError('Missing the required parameter `body` when calling `update_user`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in local_var_params):
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['ApiKey']
return self.api_client.call_api('/api/v1/users', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1User', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
@commands.command()
@ext.long_help('A fun command to generate a wheres waldo effect in discord, see if you can find him first!Optionally takes a size parameter to make it easier or harder')
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
"\n Play Where's Waldo!\n\n Usage: <prefix>waldo [size = 100]\n "
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X', 'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg = ''
count = 0
place = random.randint(0, size)
for i in range((size + 1)):
if (i == place):
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, (len(random_start_letters) - 1))
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if ((new_line < new_line_waldo_chance) or (count > max_waldo_line_size)):
msg += '\n'
count = 0
(await ctx.send(msg)) | -2,751,101,128,740,316,000 | Play Where's Waldo!
Usage: <prefix>waldo [size = 100] | bot/cogs/memes_cog/memes_cog.py | waldo | Cavesprout/ClemBot | python | @commands.command()
@ext.long_help('A fun command to generate a wheres waldo effect in discord, see if you can find him first!Optionally takes a size parameter to make it easier or harder')
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
"\n Play Where's Waldo!\n\n Usage: <prefix>waldo [size = 100]\n "
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X', 'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg =
count = 0
place = random.randint(0, size)
for i in range((size + 1)):
if (i == place):
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, (len(random_start_letters) - 1))
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if ((new_line < new_line_waldo_chance) or (count > max_waldo_line_size)):
msg += '\n'
count = 0
(await ctx.send(msg)) |
@ext.command()
@ext.chainable()
@ext.long_help('A fun command to spongebob meme text in discord')
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
'\n Spongebob Text\n '
random.seed(time.time())
args = args.replace('"', "'")
result = ''
for i in args:
helper = random.randint(0, 100)
if (helper > 60):
result += str(i).upper()
else:
result += str(i).lower()
(await ctx.send(result)) | -1,629,035,964,380,109,600 | Spongebob Text | bot/cogs/memes_cog/memes_cog.py | spongebob | Cavesprout/ClemBot | python | @ext.command()
@ext.chainable()
@ext.long_help('A fun command to spongebob meme text in discord')
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
'\n \n '
random.seed(time.time())
args = args.replace('"', "'")
result =
for i in args:
helper = random.randint(0, 100)
if (helper > 60):
result += str(i).upper()
else:
result += str(i).lower()
(await ctx.send(result)) |
@ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help('A fun command to generate a crab rave gif with specified text overlay')
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool]=True, *, args='Bottom text\n is dead'):
'\n Create your own crab rave.\n Usage: <prefix>crab [is_rave=True] [text=Bottom text\\n is dead]\n Aliases: rave, 🦀\n '
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = (await ctx.send('Generating your gif'))
args = args.replace('\\', '')
lines_in_text = 1
while (len(args) > (CRAB_LINE_LENGTH * lines_in_text)):
newline_loc = (CRAB_LINE_LENGTH * lines_in_text)
while (not args[newline_loc].isspace()):
newline_loc -= 1
if (newline_loc == (CRAB_LINE_LENGTH * (lines_in_text - 1))):
newline_loc = (CRAB_LINE_LENGTH * lines_in_text)
break
args = f'''{args[:newline_loc]}
{args[newline_loc:]}'''
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
(await loop.run_in_executor(pool, pillow_process, *pil_args))
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = (await ctx.send(file=attachment))
(await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author))
(await wait_msg.delete())
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif') | 3,072,281,457,158,416,000 | Create your own crab rave.
Usage: <prefix>crab [is_rave=True] [text=Bottom text\n is dead]
Aliases: rave, 🦀 | bot/cogs/memes_cog/memes_cog.py | crab | Cavesprout/ClemBot | python | @ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help('A fun command to generate a crab rave gif with specified text overlay')
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool]=True, *, args='Bottom text\n is dead'):
'\n Create your own crab rave.\n Usage: <prefix>crab [is_rave=True] [text=Bottom text\\n is dead]\n Aliases: rave, 🦀\n '
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = (await ctx.send('Generating your gif'))
args = args.replace('\\', )
lines_in_text = 1
while (len(args) > (CRAB_LINE_LENGTH * lines_in_text)):
newline_loc = (CRAB_LINE_LENGTH * lines_in_text)
while (not args[newline_loc].isspace()):
newline_loc -= 1
if (newline_loc == (CRAB_LINE_LENGTH * (lines_in_text - 1))):
newline_loc = (CRAB_LINE_LENGTH * lines_in_text)
break
args = f'{args[:newline_loc]}
{args[newline_loc:]}'
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
(await loop.run_in_executor(pool, pillow_process, *pil_args))
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = (await ctx.send(file=attachment))
(await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author))
(await wait_msg.delete())
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif') |
@ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
'\n For those who do finances with cookout trays, we proudly present the command for you\n Simply type one of the following:\n cookouttray\n ctray\n trayforjay\n\n Followed by a monetary value such as (leave off the dollar sign):\n 20\n 100\n 3.14\n\n To have it converted into cookout trays\n Examples:\n cookouttray 20\n ctray 100\n trayforjay 3.14\n \n Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout\'s website\n '
money = round(float(input), 2)
output = (money / 5)
embed = discord.Embed(title='Cash to Cookout Tray Converter', description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays', url=f'https://www.fastfoodmenuprices.com/cookout-prices/', color=Colors.ClemsonOrange)
(await ctx.send(embed=embed)) | -5,427,305,081,996,591,000 | For those who do finances with cookout trays, we proudly present the command for you
Simply type one of the following:
cookouttray
ctray
trayforjay
Followed by a monetary value such as (leave off the dollar sign):
20
100
3.14
To have it converted into cookout trays
Examples:
cookouttray 20
ctray 100
trayforjay 3.14
Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout's website | bot/cogs/memes_cog/memes_cog.py | cookouttray | Cavesprout/ClemBot | python | @ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
'\n For those who do finances with cookout trays, we proudly present the command for you\n Simply type one of the following:\n cookouttray\n ctray\n trayforjay\n\n Followed by a monetary value such as (leave off the dollar sign):\n 20\n 100\n 3.14\n\n To have it converted into cookout trays\n Examples:\n cookouttray 20\n ctray 100\n trayforjay 3.14\n \n Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout\'s website\n '
money = round(float(input), 2)
output = (money / 5)
embed = discord.Embed(title='Cash to Cookout Tray Converter', description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays', url=f'https://www.fastfoodmenuprices.com/cookout-prices/', color=Colors.ClemsonOrange)
(await ctx.send(embed=embed)) |
def _invalid_headers(self, url, headers):
'\n Verify whether the provided metadata in the URL is also present in the headers\n :param url: .../file.txt&content-type=app%2Fjson&Signature=..\n :param headers: Content-Type=app/json\n :return: True or False\n '
metadata_to_check = {'content-disposition': 'Content-Disposition', 'content-encoding': 'Content-Encoding', 'content-language': 'Content-Language', 'content-length': 'Content-Length', 'content-md5': 'Content-MD5', 'content-type': 'Content-Type'}
for (url_key, header_key) in metadata_to_check.items():
metadata_in_url = re.search((url_key + '=(.+?)(&.+$|$)'), url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if ((header_key not in headers) or (url_value != headers[header_key])):
return True
return False | 350,650,712,124,656,700 | Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False | moto/s3/responses.py | _invalid_headers | nom3ad/moto | python | def _invalid_headers(self, url, headers):
'\n Verify whether the provided metadata in the URL is also present in the headers\n :param url: .../file.txt&content-type=app%2Fjson&Signature=..\n :param headers: Content-Type=app/json\n :return: True or False\n '
metadata_to_check = {'content-disposition': 'Content-Disposition', 'content-encoding': 'Content-Encoding', 'content-language': 'Content-Language', 'content-length': 'Content-Length', 'content-md5': 'Content-MD5', 'content-type': 'Content-Type'}
for (url_key, header_key) in metadata_to_check.items():
metadata_in_url = re.search((url_key + '=(.+?)(&.+$|$)'), url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if ((header_key not in headers) or (url_value != headers[header_key])):
return True
return False |
def syncify(*types):
"\n Converts all the methods in the given types (class definitions)\n into synchronous, which return either the coroutine or the result\n based on whether ``asyncio's`` event loop is running.\n "
for t in types:
for name in dir(t):
if ((not name.startswith('_')) or (name == '__call__')):
if inspect.iscoroutinefunction(getattr(t, name)):
_syncify_wrap(t, name) | 6,797,930,550,718,388,000 | Converts all the methods in the given types (class definitions)
into synchronous, which return either the coroutine or the result
based on whether ``asyncio's`` event loop is running. | telethon/sync.py | syncify | SlavikMIPT/Telethon | python | def syncify(*types):
"\n Converts all the methods in the given types (class definitions)\n into synchronous, which return either the coroutine or the result\n based on whether ``asyncio's`` event loop is running.\n "
for t in types:
for name in dir(t):
if ((not name.startswith('_')) or (name == '__call__')):
if inspect.iscoroutinefunction(getattr(t, name)):
_syncify_wrap(t, name) |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
'Support two kind of enties - MiCloud and Gateway.'
if ('servers' in entry.data):
return (await _setup_micloud_entry(hass, entry))
if entry.data:
hass.config_entries.async_update_entry(entry, data={}, options=entry.data)
(await _setup_logger(hass))
if (not entry.update_listeners):
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True | -7,320,851,031,940,415,000 | Support two kind of enties - MiCloud and Gateway. | custom_components/xiaomi_gateway3/__init__.py | async_setup_entry | Gamma-Software/HomeAssistantConfig | python | async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
if ('servers' in entry.data):
return (await _setup_micloud_entry(hass, entry))
if entry.data:
hass.config_entries.async_update_entry(entry, data={}, options=entry.data)
(await _setup_logger(hass))
if (not entry.update_listeners):
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True |
async def _handle_device_remove(hass: HomeAssistant):
'Remove device from Hass and Mi Home if the device is renamed to\n `delete`.\n '
async def device_registry_updated(event: Event):
if (event.data['action'] != 'update'):
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
if ((not hass_device) or (not hass_device.identifiers)):
return
for hass_did in hass_device.identifiers:
if ((hass_did[0] == DOMAIN) and (hass_device.name_by_user == 'delete')):
break
else:
return
for gw in hass.data[DOMAIN].values():
if (not isinstance(gw, Gateway3)):
continue
gw_device = gw.get_device(hass_did[1])
if (not gw_device):
continue
if (gw_device['type'] == 'zigbee'):
gw.debug(f"Remove device: {gw_device['did']}")
(await gw.miio.send('remove_device', [gw_device['did']]))
break
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated) | -369,399,004,895,719,400 | Remove device from Hass and Mi Home if the device is renamed to
`delete`. | custom_components/xiaomi_gateway3/__init__.py | _handle_device_remove | Gamma-Software/HomeAssistantConfig | python | async def _handle_device_remove(hass: HomeAssistant):
'Remove device from Hass and Mi Home if the device is renamed to\n `delete`.\n '
async def device_registry_updated(event: Event):
if (event.data['action'] != 'update'):
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
if ((not hass_device) or (not hass_device.identifiers)):
return
for hass_did in hass_device.identifiers:
if ((hass_did[0] == DOMAIN) and (hass_device.name_by_user == 'delete')):
break
else:
return
for gw in hass.data[DOMAIN].values():
if (not isinstance(gw, Gateway3)):
continue
gw_device = gw.get_device(hass_did[1])
if (not gw_device):
continue
if (gw_device['type'] == 'zigbee'):
gw.debug(f"Remove device: {gw_device['did']}")
(await gw.miio.send('remove_device', [gw_device['did']]))
break
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated) |
async def shutdown_client(app: Application):
'\n Attempt to close the async HTTP client session.\n\n :param app: The application object\n '
logger.info('Stopping HTTP client')
try:
(await app['client'].close())
except KeyError:
pass | -6,666,553,373,865,942,000 | Attempt to close the async HTTP client session.
:param app: The application object | virtool/shutdown.py | shutdown_client | KingBain/virtool | python | async def shutdown_client(app: Application):
'\n Attempt to close the async HTTP client session.\n\n :param app: The application object\n '
logger.info('Stopping HTTP client')
try:
(await app['client'].close())
except KeyError:
pass |
async def shutdown_dispatcher(app: Application):
"\n Attempt to close the app's `Dispatcher` object.\n\n :param app: The application object\n "
logger.info('Stopping dispatcher')
try:
(await app['dispatcher'].close())
except KeyError:
pass | 1,539,764,078,448,729,900 | Attempt to close the app's `Dispatcher` object.
:param app: The application object | virtool/shutdown.py | shutdown_dispatcher | KingBain/virtool | python | async def shutdown_dispatcher(app: Application):
"\n Attempt to close the app's `Dispatcher` object.\n\n :param app: The application object\n "
logger.info('Stopping dispatcher')
try:
(await app['dispatcher'].close())
except KeyError:
pass |
async def shutdown_executors(app: Application):
'\n Attempt to close the `ThreadPoolExecutor` and `ProcessPoolExecutor`.\n\n :param app: the application object\n '
try:
app['executor'].shutdown(wait=True)
except KeyError:
pass
try:
app['process_executor'].shutdown(wait=True)
except KeyError:
pass | 8,947,247,468,802,912,000 | Attempt to close the `ThreadPoolExecutor` and `ProcessPoolExecutor`.
:param app: the application object | virtool/shutdown.py | shutdown_executors | KingBain/virtool | python | async def shutdown_executors(app: Application):
'\n Attempt to close the `ThreadPoolExecutor` and `ProcessPoolExecutor`.\n\n :param app: the application object\n '
try:
app['executor'].shutdown(wait=True)
except KeyError:
pass
try:
app['process_executor'].shutdown(wait=True)
except KeyError:
pass |
async def shutdown_scheduler(app: Application):
"\n Attempt to the close the app's `aiojobs` scheduler.\n\n :param app: The application object\n "
scheduler = get_scheduler_from_app(app)
(await scheduler.close()) | -6,032,335,249,375,110,000 | Attempt to the close the app's `aiojobs` scheduler.
:param app: The application object | virtool/shutdown.py | shutdown_scheduler | KingBain/virtool | python | async def shutdown_scheduler(app: Application):
"\n Attempt to the close the app's `aiojobs` scheduler.\n\n :param app: The application object\n "
scheduler = get_scheduler_from_app(app)
(await scheduler.close()) |
async def shutdown_redis(app: Application):
"\n Attempt to close the app's `redis` instance.\n\n :param app: The application object\n "
logger.info('Closing Redis connection')
try:
app['redis'].close()
(await app['redis'].wait_closed())
except KeyError:
pass | -5,224,621,293,652,626,000 | Attempt to close the app's `redis` instance.
:param app: The application object | virtool/shutdown.py | shutdown_redis | KingBain/virtool | python | async def shutdown_redis(app: Application):
"\n Attempt to close the app's `redis` instance.\n\n :param app: The application object\n "
logger.info('Closing Redis connection')
try:
app['redis'].close()
(await app['redis'].wait_closed())
except KeyError:
pass |
async def drop_fake_postgres(app: Application):
'\n Drop a fake PostgreSQL database if the instance was run with the ``--fake`` option.\n\n :param app: the application object\n\n '
if (app['config'].fake and ('fake_' in app['config'].postgres_connection_string)):
async with app['pg'].begin() as conn:
(await conn.run_sync(Base.metadata.drop_all))
logger.debug('Dropped fake PostgreSQL database.') | -5,690,571,300,743,319,000 | Drop a fake PostgreSQL database if the instance was run with the ``--fake`` option.
:param app: the application object | virtool/shutdown.py | drop_fake_postgres | KingBain/virtool | python | async def drop_fake_postgres(app: Application):
'\n Drop a fake PostgreSQL database if the instance was run with the ``--fake`` option.\n\n :param app: the application object\n\n '
if (app['config'].fake and ('fake_' in app['config'].postgres_connection_string)):
async with app['pg'].begin() as conn:
(await conn.run_sync(Base.metadata.drop_all))
logger.debug('Dropped fake PostgreSQL database.') |
@image_comparison(baseline_images=['test_plot'], extensions=['png'])
def test_plot():
'\n Test the rasters plot as multiples subplots.\n '
rasters = ['data/relatives/forest_111.tif', 'data/relatives/forest_112.tif', 'data/relatives/forest_113.tif', 'data/relatives/forest_121.tif', 'data/relatives/forest_122.tif', 'data/relatives/forest_123.tif', 'data/relatives/forest_211.tif', 'data/relatives/forest_212.tif', 'data/relatives/forest_213.tif', 'data/relatives/forest_221.tif', 'data/relatives/forest_222.tif', 'data/relatives/forest_223.tif']
title = 'Mean precipitation (mm/day)'
subtitles = ['HadGEM2 RCP4.5', 'HadGEM2 RCP8.5', 'MIROC5 RCP4.5', 'MIROC5 RCP8.5']
labels = ['2011-2040', '2041-2070', '2071-2100']
color = 'RdYlBu_r'
rows = 3
cols = 4
plots.maps(rasters, rows, cols, color, title, subtitles, labels) | -8,635,016,727,795,173,000 | Test the rasters plot as multiples subplots. | tests/test_plots.py | test_plot | rochamatcomp/python-rocha | python | @image_comparison(baseline_images=['test_plot'], extensions=['png'])
def test_plot():
'\n \n '
rasters = ['data/relatives/forest_111.tif', 'data/relatives/forest_112.tif', 'data/relatives/forest_113.tif', 'data/relatives/forest_121.tif', 'data/relatives/forest_122.tif', 'data/relatives/forest_123.tif', 'data/relatives/forest_211.tif', 'data/relatives/forest_212.tif', 'data/relatives/forest_213.tif', 'data/relatives/forest_221.tif', 'data/relatives/forest_222.tif', 'data/relatives/forest_223.tif']
title = 'Mean precipitation (mm/day)'
subtitles = ['HadGEM2 RCP4.5', 'HadGEM2 RCP8.5', 'MIROC5 RCP4.5', 'MIROC5 RCP8.5']
labels = ['2011-2040', '2041-2070', '2071-2100']
color = 'RdYlBu_r'
rows = 3
cols = 4
plots.maps(rasters, rows, cols, color, title, subtitles, labels) |
def convert_custom_objects(obj):
'Handles custom object lookup.\n\n Arguments:\n obj: object, dict, or list.\n\n Returns:\n The same structure, where occurrences\n of a custom object name have been replaced\n with the custom object.\n '
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for (key, value) in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if (obj in custom_objects):
return custom_objects[obj]
return obj | 6,778,651,313,674,850,000 | Handles custom object lookup.
Arguments:
obj: object, dict, or list.
Returns:
The same structure, where occurrences
of a custom object name have been replaced
with the custom object. | horovod/spark/keras/tensorflow.py | convert_custom_objects | HCYXAS/horovod | python | def convert_custom_objects(obj):
'Handles custom object lookup.\n\n Arguments:\n obj: object, dict, or list.\n\n Returns:\n The same structure, where occurrences\n of a custom object name have been replaced\n with the custom object.\n '
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for (key, value) in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if (obj in custom_objects):
return custom_objects[obj]
return obj |
def run(datasets, splice_sites, sub_models, save, vis, iter, metrics, summary, config, num_folds, bal, imbal, imbal_t, imbal_f, batch_size, epochs):
"\n Parameters\n ----------\n dataset: a string {nn269, ce, hs3d} indicating which dataset to use\n splice_site_type: a string {acceptor, donor} indicating which splice\n site to train on\n model_architecture: a string {cnn, dnn, rnn} indicating which model\n architecture to use for training\n save_model: boolean, whether to save the current model\n bal: boolean, whether to balance the dataset\n summary: boolean, whether to print out the model architecture summary\n config: boolean, whether to print out the model's configuration\n visualize: boolean, whether to save a performance graph of the model\n metrics: boolean, whether to print out the evaluation metrics for the model\n num_folds: int (default 10), the number of folds for k-fold cross validation\n epochs: int (default 15), the number of epochs for the chosen model\n batch_size: int (default 32), the model batch size\n model_iter: integer, the iteration of the current model architecture (e.g.\n if this is the third cnn architecture you are testing, use 3)\n "
network_rows = {'acceptor': {'nn269': 90, 'ce': 141, 'hs3d': 140, 'hs2': 602, 'ce2': 602, 'dm': 602, 'ar': 602, 'or': 602}, 'donor': {'nn269': 15, 'ce': 141, 'hs3d': 140, 'hs2': 602, 'ce2': 602, 'dm': 602, 'ar': 602, 'or': 602}}
to_run = dict([(sub_model, {'nn269': '', 'ce': '', 'hs3d': '', 'hs2': '', 'ce2': '', 'dm': '', 'ar': '', 'or': ''}) for sub_model in sub_models])
results = copy.deepcopy(to_run)
for sub_model in sub_models:
for dataset in datasets:
to_run[sub_model][dataset] = encode_data.encode(dataset, sub_model, bal)
evals = dict([(sub_model, {'f1': '', 'precision': '', 'sensitivity': '', 'specificity': '', 'recall': '', 'mcc': '', 'err_rate': ''}) for sub_model in sub_models])
for sub_model in sub_models:
for dataset in datasets:
if (to_run[sub_model][dataset] == ''):
pass
else:
results[sub_model][dataset] = utils.cross_validation(num_folds, sub_model, splice_sites, dataset, to_run[sub_model][dataset], network_rows, evals, summary, config, batch_size, epochs, save)
print(results)
return results | 5,203,899,123,250,973,000 | Parameters
----------
dataset: a string {nn269, ce, hs3d} indicating which dataset to use
splice_site_type: a string {acceptor, donor} indicating which splice
site to train on
model_architecture: a string {cnn, dnn, rnn} indicating which model
architecture to use for training
save_model: boolean, whether to save the current model
bal: boolean, whether to balance the dataset
summary: boolean, whether to print out the model architecture summary
config: boolean, whether to print out the model's configuration
visualize: boolean, whether to save a performance graph of the model
metrics: boolean, whether to print out the evaluation metrics for the model
num_folds: int (default 10), the number of folds for k-fold cross validation
epochs: int (default 15), the number of epochs for the chosen model
batch_size: int (default 32), the model batch size
model_iter: integer, the iteration of the current model architecture (e.g.
if this is the third cnn architecture you are testing, use 3) | sub_models.py | run | tmartin2/EnsembleSplice-Inactive | python | def run(datasets, splice_sites, sub_models, save, vis, iter, metrics, summary, config, num_folds, bal, imbal, imbal_t, imbal_f, batch_size, epochs):
"\n Parameters\n ----------\n dataset: a string {nn269, ce, hs3d} indicating which dataset to use\n splice_site_type: a string {acceptor, donor} indicating which splice\n site to train on\n model_architecture: a string {cnn, dnn, rnn} indicating which model\n architecture to use for training\n save_model: boolean, whether to save the current model\n bal: boolean, whether to balance the dataset\n summary: boolean, whether to print out the model architecture summary\n config: boolean, whether to print out the model's configuration\n visualize: boolean, whether to save a performance graph of the model\n metrics: boolean, whether to print out the evaluation metrics for the model\n num_folds: int (default 10), the number of folds for k-fold cross validation\n epochs: int (default 15), the number of epochs for the chosen model\n batch_size: int (default 32), the model batch size\n model_iter: integer, the iteration of the current model architecture (e.g.\n if this is the third cnn architecture you are testing, use 3)\n "
network_rows = {'acceptor': {'nn269': 90, 'ce': 141, 'hs3d': 140, 'hs2': 602, 'ce2': 602, 'dm': 602, 'ar': 602, 'or': 602}, 'donor': {'nn269': 15, 'ce': 141, 'hs3d': 140, 'hs2': 602, 'ce2': 602, 'dm': 602, 'ar': 602, 'or': 602}}
to_run = dict([(sub_model, {'nn269': , 'ce': , 'hs3d': , 'hs2': , 'ce2': , 'dm': , 'ar': , 'or': }) for sub_model in sub_models])
results = copy.deepcopy(to_run)
for sub_model in sub_models:
for dataset in datasets:
to_run[sub_model][dataset] = encode_data.encode(dataset, sub_model, bal)
evals = dict([(sub_model, {'f1': , 'precision': , 'sensitivity': , 'specificity': , 'recall': , 'mcc': , 'err_rate': }) for sub_model in sub_models])
for sub_model in sub_models:
for dataset in datasets:
if (to_run[sub_model][dataset] == ):
pass
else:
results[sub_model][dataset] = utils.cross_validation(num_folds, sub_model, splice_sites, dataset, to_run[sub_model][dataset], network_rows, evals, summary, config, batch_size, epochs, save)
print(results)
return results |
@check_type(dict)
def get_note_text(note):
'Parses note content from different note types.\n\n :param dict: an ArchivesSpace note.\n\n :returns: a list containing note content.\n :rtype: list\n '
def parse_subnote(subnote):
'Parses note content from subnotes.\n\n :param dict: an ArchivesSpace subnote.\n\n :returns: a list containing subnote content.\n :rtype: list\n '
if (subnote['jsonmodel_type'] in ['note_orderedlist', 'note_index']):
content = subnote['items']
elif (subnote['jsonmodel_type'] in ['note_chronology', 'note_definedlist']):
content = []
for k in subnote['items']:
for i in k:
content += (k.get(i) if isinstance(k.get(i), list) else [k.get(i)])
else:
content = (subnote['content'] if isinstance(subnote['content'], list) else [subnote['content']])
return content
if (note['jsonmodel_type'] == 'note_singlepart'):
content = note['content']
elif (note['jsonmodel_type'] == 'note_bibliography'):
data = []
data += note['content']
data += note['items']
content = data
elif (note['jsonmodel_type'] == 'note_index'):
data = []
for item in note['items']:
data.append(item['value'])
content = data
else:
subnote_content_list = list((parse_subnote(sn) for sn in note['subnotes']))
content = [c for subnote_content in subnote_content_list for c in subnote_content]
return content | 9,174,333,711,460,146,000 | Parses note content from different note types.
:param dict: an ArchivesSpace note.
:returns: a list containing note content.
:rtype: list | rac_aspace/data_helpers.py | get_note_text | RockefellerArchiveCenter/rac_aspace | python | @check_type(dict)
def get_note_text(note):
'Parses note content from different note types.\n\n :param dict: an ArchivesSpace note.\n\n :returns: a list containing note content.\n :rtype: list\n '
def parse_subnote(subnote):
'Parses note content from subnotes.\n\n :param dict: an ArchivesSpace subnote.\n\n :returns: a list containing subnote content.\n :rtype: list\n '
if (subnote['jsonmodel_type'] in ['note_orderedlist', 'note_index']):
content = subnote['items']
elif (subnote['jsonmodel_type'] in ['note_chronology', 'note_definedlist']):
content = []
for k in subnote['items']:
for i in k:
content += (k.get(i) if isinstance(k.get(i), list) else [k.get(i)])
else:
content = (subnote['content'] if isinstance(subnote['content'], list) else [subnote['content']])
return content
if (note['jsonmodel_type'] == 'note_singlepart'):
content = note['content']
elif (note['jsonmodel_type'] == 'note_bibliography'):
data = []
data += note['content']
data += note['items']
content = data
elif (note['jsonmodel_type'] == 'note_index'):
data = []
for item in note['items']:
data.append(item['value'])
content = data
else:
subnote_content_list = list((parse_subnote(sn) for sn in note['subnotes']))
content = [c for subnote_content in subnote_content_list for c in subnote_content]
return content |
@check_type(dict)
def text_in_note(note, query_string):
'Performs fuzzy searching against note text.\n\n :param dict note: an ArchivesSpace note.\n :param str query_string: a string to match against.\n\n :returns: True if a match is found for `query_string`, False if no match is\n found.\n :rtype: bool\n '
CONFIDENCE_RATIO = 97
'int: Minimum confidence ratio to match against.'
note_content = get_note_text(note)
ratio = fuzz.token_sort_ratio(' '.join([n.lower() for n in note_content]), query_string.lower(), score_cutoff=CONFIDENCE_RATIO)
return bool(ratio) | -4,301,119,841,621,400,000 | Performs fuzzy searching against note text.
:param dict note: an ArchivesSpace note.
:param str query_string: a string to match against.
:returns: True if a match is found for `query_string`, False if no match is
found.
:rtype: bool | rac_aspace/data_helpers.py | text_in_note | RockefellerArchiveCenter/rac_aspace | python | @check_type(dict)
def text_in_note(note, query_string):
'Performs fuzzy searching against note text.\n\n :param dict note: an ArchivesSpace note.\n :param str query_string: a string to match against.\n\n :returns: True if a match is found for `query_string`, False if no match is\n found.\n :rtype: bool\n '
CONFIDENCE_RATIO = 97
'int: Minimum confidence ratio to match against.'
note_content = get_note_text(note)
ratio = fuzz.token_sort_ratio(' '.join([n.lower() for n in note_content]), query_string.lower(), score_cutoff=CONFIDENCE_RATIO)
return bool(ratio) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.